% -*- Latex -*- \documentstyle[12pt]{report} % Here is a copy of the Stanford thesis style. % suthesis.sty % $Header: /u1/pallas/tex/suthesis.sty,v 1.2 89/02/22 09:37:58 pallas Exp $ % Last edit by Joseph Pallas \typeout{Document Style Option `suthesis' <$Date: 89/02/22 09:37:58 $>.} \catcode`\@=11 \@ifundefined{chapter}{\@latexerr{The `suthesis' option should be used with the `report' document style}{You should probably read the suthesis documentation.}}{} \oddsidemargin 0.5in \evensidemargin 0in \marginparwidth 40pt \marginparsep 10pt \topmargin 0pt \headsep .5in \textheight 8.1in \textwidth 6in \brokenpenalty=10000 \renewcommand{\baselinestretch}{1.3} \def\@xfloat#1[#2]{\ifhmode \@bsphack\@floatpenalty -\@Mii\else \@floatpenalty-\@Miii\fi\def\@captype{#1}\ifinner \@parmoderr\@floatpenalty\z@ \else\@next\@currbox\@freelist{\@tempcnta\csname ftype@#1\endcsname \multiply\@tempcnta\@xxxii\advance\@tempcnta\sixt@@n \@tfor \@tempa :=#2\do {\if\@tempa h\advance\@tempcnta \@ne\fi \if\@tempa t\advance\@tempcnta \tw@\fi \if\@tempa b\advance\@tempcnta 4\relax\fi \if\@tempa p\advance\@tempcnta 8\relax\fi }\global\count\@currbox\@tempcnta}\@fltovf\fi \global\setbox\@currbox\vbox\bgroup \def\baselinestretch{1}\@normalsize \boxmaxdepth\z@ \hsize\columnwidth \@parboxrestore} \long\def\@footnotetext#1{\insert\footins{\def\baselinestretch{1}\footnotesize \interlinepenalty\interfootnotelinepenalty \splittopskip\footnotesep \splitmaxdepth \dp\strutbox \floatingpenalty \@MM \hsize\columnwidth \@parboxrestore \edef\@currentlabel{\csname p@footnote\endcsname\@thefnmark}\@makefntext {\rule{\z@}{\footnotesep}\ignorespaces #1\strut}}} \def\dept#1{\gdef\@dept{#1}} \def\principaladviser#1{\gdef\@principaladviser{#1}} \def\advis@r{Adviser} \def\principaladvisor#1{\gdef\@principaladviser{#1}\gdef\advis@r{Advisor}} \def\firstreader#1{\gdef\@firstreader{#1}} \def\secondreader#1{\gdef\@secondreader{#1}} \def\submitdate#1{\gdef\@submitdate{#1}} \def\copyrightyear#1{\gdef\@copyrightyear{#1}} % \author, \title in report \def\@title{}\def\@author{}\def\@dept{computer science} \def\@principaladviser{}\def\@firstreader{}\def\@secondreader{} \def\@submitdate{\ifcase\the\month\or January\or February\or March\or April\or May\or June\or July\or August\or September\or October\or November\or December\fi \space \number\the\year} \ifnum\month=12 \@tempcnta=\year \advance\@tempcnta by 1 \edef\@copyrightyear{\number\the\@tempcnta} \else \def\@copyrightyear{\number\the\year} \newif\ifcopyright \newif\iffigurespage \newif\iftablespage \copyrighttrue \figurespagetrue \tablespagetrue \def\titlep{% \thispagestyle{empty}% \null\vskip1in% \begin{center} \Large\uppercase\expandafter{\@title} \end{center} \vfill \begin{center} \sc a dissertation\\ submitted to the department of \lowercase\expandafter{\@dept}\\ and the committee on graduate studies\\ of stanford university\\ in partial fulfillment of the requirements\\ for the degree of\\ doctor of philosophy \end{center} \vfill \begin{center} \rm By\\ \@author\\ \@submitdate\\ \end{center}\vskip.5in\newpage} \def\copyrightpage{% \null\vfill \begin{center} \large \copyright\ Copyright \@copyrightyear\ by \@author\\ All Rights Reserved \end{center} \vfill\newpage} \long\def\signature#1{% \begin{center} \begin{minipage}{4in} \parindent=0pt I certify that I have read this dissertation and that in my opinion it is fully adequate, in scope and in quality, as a dissertation for the degree of Doctor of Philosophy. \vspace{.5in} \hbox to 4in{\hfil\shortstack{\vrule width 3in height 0.4pt\\#1}} \end{minipage} \end{center}} \def\signaturepage{% \signature{\@principaladviser\\(Principal \advis@r)} \vfill \signature\@firstreader \vfill \signature\@secondreader \vfill \begin{center} \begin{minipage}{4in} Approved for the University Committee on Graduate Studies:\par \vspace{.5in} \hbox to 4in{\hfil\vrule width 3in height 0.4pt} \end{minipage} \end{center}} \def\beforepreface{ \pagenumbering{roman} \pagestyle{plain} \titlep \ifcopyright\copyrightpage\fi \signaturepage} \def\prefacesection#1{% \chapter*{#1} \addcontentsline{toc}{chapter}{#1}} \def\afterpreface{\newpage \tableofcontents \newpage \iftablespage \listoftables \newpage \iffigurespage \listoffigures \newpage \pagenumbering{arabic} \pagestyle{headings}} \let\@ldthebibliography\thebibliography \renewcommand{\thebibliography}[1]{\newpage \addcontentsline{toc}{chapter}{Bibliography} \@ldthebibliography{#1}} \pagestyle{headings} \catcode`\@=12 \def\PROC#1{{\mbox{${\cal P}[{#1}]$}}} \def\PROCD#1{{\mbox{${\cal P}_d[{#1}]$}}} \def\PROCK#1{{\mbox{${\cal P}_k[{#1}]$}}} \def\PROCB#1{{\mbox{${\cal P}_{k\bot}[{#1}]$}}} \def\PROCBC#1{{\mbox{$\overline{{\cal P}_{k\bot}}[{#1}]$}}} \def\CPROC#1{{\mbox{${\cal P}^{\bullet}[{#1}]$}}} \def\CPROCBC#1{{\mbox{$\overline{{\cal P}_{k\bot}^{\bullet}}[{#1}]$}}} \def\nato{\buildrel{\cdot}\over{\to}} \def\cod{\hbox{\rm cod}} \def\id{\hbox{\rm id}} \def\colim{\hbox{\rm colim}} \def\strip{\let\sS} \font\eighttt=cmtt8 \scriptfont\ttfam=\eighttt \def\:#1{\hbox{\tt\ifcat\noexpand#1\relax \expandafter\strip\string#1\else\noexpand#1\fi}} \def\dsum{\mathbin{+\mkern-5mu+}} \def\dconc{\mathbin{\|\mkern-1mu|}} \def\conc{\mathbin{\|}} \def\cat{\mathbin{;}} \def\leomega{{{\le}\omega}} \newtheorem{definition}[theorem]{Definition} \makeatletter \def\@citex[#1]#2{\if@filesw\immediate\write\@auxout{\string\citation{#2}}\fi \def\@citea{}\@cite{\@for\@citeb:=#2\do {\@citea\def\@citea{,\penalty\@m\ }\@ifundefined {b@\@citeb}{{\bf ?}\@warning {Citation `\@citeb' on page \thepage \space undefined}}% \hbox{\csname b@\@citeb\endcsname}}}{#1}} \makeatother \long\def\comment#1{} \def\cAl#1#2{{\cal #2}} \def\maybemath{\ifmmode\def\mm{\relax}\else\def\mm{$}\fi} \def\%#1{{\maybemath \mm \ifcat\noexpand#1\relax \expandafter\cAl\string#1\else\cal\noexpand#1\fi\mm}} \def\strip{\let\sS} \def\_#1{{\bf \ifcat\noexpand#1\relax \expandafter\strip\string#1\else\noexpand#1\fi}} \def\:#1{\hbox{\tt\ifcat\noexpand#1\relax \expandafter\strip\string#1\else\noexpand#1\fi}} \newcount\greekno \def\^#1{{\maybemath \greekno=`#1\advance\greekno by -65\relax \ifcase\greekno A\or B\or \Psi\or \Delta\or E\or \Phi\or \Gamma\or H\or I\or J\or K\or \Lambda\or M\or N\or \Theta\or \Pi\or Q\or R\or \Sigma\or \Gamma\or \Upsilon\or V\or \Omega\or \Xi \else\advance\greekno by -32\relax \ifcase\greekno \alpha\or \beta\or \psi\or \delta\or \epsilon\or \phi\or \gamma\or \eta\or \iota\or \vartheta\or \kappa\or \lambda\or \mu\or \nu\or \theta\or \pi\or \varphi\or \rho\or \sigma\or \tau\or \upsilon\or \varsigma\or \omega\or \xi\or \varrho\or \zeta\else\errmessage{Bad \char`\% item}\fi \mm}} \def\implies{\Longrightarrow} \def\iff{\Longleftrightarrow} \def\R{{\mbox{${\bf\bar R}_+$}}} \def\N{{\mbox{\bf N}}} \def\Set{{\bf Set}} \def\Seti{{\bf Seti}} \def\Pos{{\bf Pos}} \def\Pros{{\bf Pros}} \def\Pom{{\bf Pom}} \def\Pomi{{\bf Pomi}} \def\Int{{\bf Int}} \def\SR{{\mathord{\bf SR}}} \def\hCat{{\mbox -}\_\Cat} \def\hPhys{{\mbox -}\_\Phys} \def\Z2{{\mbox{\bf Z$_2$}}} \def\DYN{{\mbox{\bf DYN}}} \def\TMP{{\mbox{\bf TMP}}} \def\comma{\mathop{\downarrow}} \def\vertex{\mathop{\rm vert}} \def\<#1>{\langle #1 \rangle} \def\op{^{\rm op}} \def\ob{{\rm ob}} \def\Tau{T} \def\qed{\vrule height6pt width4pt depth0pt} % end-of-proof etc. \newtheorem{theorem}{Theorem} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newenvironment{proof}{\medskip\noindent{\it Proof\/}:\quad}{\quad\qed\medskip} \newenvironment{proofl}[1]{\medskip\noindent{\it #1\/}:\quad}{\quad\qed\medskip} \newenvironment{proofo}{\medskip\noindent{\it Outline of Proof\/}:\quad}{\quad\qed\medskip} \message{} %%======================================================================% %% TeX macros for drawing rectangular category-theory diagrams % %% % %% Paul Taylor % %% % %% Department of Computing, Imperial College, London SW7 2BZ % %% +44 71 589 5111 x 5057 pt@doc.ic.ac.uk % %% % %% For user documentation, see "diagram-doc.tex" % %% % %% Contents: % %% (1) arrow commands (\rTo etc) % %% (2) bits of arrows (\rhvee etc) % %% After the first two sections (40%) the rest is intended to be % %% meaningless in the undocumented version. % %% % %% COPYRIGHT NOTICE: % %% This package may be copied and used freely for any academic % %% (not commercial or military) purpose, on condition that it % %% is not altered in any way, and that an acknowledgement is % %% included in any published paper using it. It is supplied % %% ``as is'' without warranty, express or implied. % %% % %% CORRUPTION WARNING: % %% BITNET (IBM) machines may corrupt certain important characters % %% in transmission by electronic mail: % %% {}=curly braces (grouping), \=backslash (keywords), % %% $=dollar (maths), %=percent (comment), &=and (alignment) % %% ^=caret (superscript), _=underline (subscript), ~=tidle % %% #=hash (argument), []=square brackets, |=vertical % %% % %% HISTORY % %% -- all following version numbers are post-facto -- % %% 3 (Jan 90) stretching vertical arrows % %% 2 (Sept 89) stretching horizontal arrows; ``superscript'' labels % %% 1 (1987) horiz arrows stretch to edge of cell % %% 0 (1986) implementation of Knuth's TeXercise 18.46 % %%======================================================================% %% user-settable parameters: \newdimen\DiagramCellHeight\DiagramCellHeight3em %% \newdimen\DiagramCellWidth\DiagramCellWidth3em %% \newdimen\MapBreadth\MapBreadth.04em %% \newdimen\MapShortFall\MapShortFall.4em %% \newdimen\PileSpacing\PileSpacing1em %% \def\labelstyle{\ifincommdiag\textstyle\else\scriptstyle\fi}%% \let\objectstyle\displaystyle %%======================================================================% %% % %% (1) ARROW COMMANDS % %% % %%======================================================================% \def\rTo{\HorizontalMap\empty-\empty-\rhvee}%% \def\lTo{\HorizontalMap\lhvee-\empty-\empty}%% \def\dTo{\VerticalMap\empty|\empty|\dhvee}%% \def\uTo{\VerticalMap\uhvee|\empty|\empty}%% \let\uFrom\uTo\let\lFrom\lTo \def\rArr{\HorizontalMap\empty-\empty-\rhla}%% \def\lArr{\HorizontalMap\lhla-\empty-\empty}%% \def\dArr{\VerticalMap\empty|\empty|\dhla}%% \def\uArr{\VerticalMap\uhla|\empty|\empty} \def\rDotsto{\HorizontalMap\empty\hfdot\hfdot\hfdot\rhvee}%% \def\lDotsto{\HorizontalMap\lhvee\hfdot\hfdot\hfdot\empty}%% \def\dDotsto{\VerticalMap\empty\vfdot\vfdot\vfdot\dhvee}%% \def\uDotsto{\VerticalMap\uhvee\vfdot\vfdot\vfdot\empty}%% \let\uDotsfrom\uDotsto\let\lDotsfrom\lDotsto \def\rDashto{\HorizontalMap\empty\hfdash\hfdash\hfdash\rhvee}%% \def\lDashto{\HorizontalMap\lhvee\hfdash\hfdash\hfdash\empty}%% \def\dDashto{\VerticalMap\empty\vfdash\vfdash\vfdash\dhvee}%% \def\uDashto{\VerticalMap\uhvee\vfdash\vfdash\vfdash\empty}%% \let\uDashfrom\uDashto\let\lDashfrom\lDashto \def\rImplies{\HorizontalMap==\empty=\Rightarrow}%% \def\lImplies{\HorizontalMap\Leftarrow=\empty==}%% \def\dImplies{\VerticalMap\|\|\empty\|\Downarrow}%% \def\uImplies{\VerticalMap\Uparrow\|\empty\|\|}%% \let\uImpliedby\uImplies\let\lImpliedby\lImplies \def\rMapsto{\HorizontalMap\rtbar-\empty-\rhvee}%% \def\lMapsto{\HorizontalMap\lhvee-\empty-\ltbar}%% \def\dMapsto{\VerticalMap\dtbar|\empty|\dhvee}%% \def\uMapsto{\VerticalMap\uhvee|\empty|\utbar}%% \let\uMapsfrom\uMapsto\let\lMapsfrom\lMapsto \def\rIntoA{\HorizontalMap\rthooka-\empty-\rhvee}%% \def\rIntoB{\HorizontalMap\rthookb-\empty-\rhvee}%% \def\lIntoA{\HorizontalMap\lhvee-\empty-\lthooka}%% \def\lIntoB{\HorizontalMap\lhvee-\empty-\lthookb}%% \def\dIntoA{\VerticalMap\dthooka|\empty|\dhvee}%% \def\dIntoB{\VerticalMap\dthookb|\empty|\dhvee}%% \def\uIntoA{\VerticalMap\uhvee|\empty|\uthooka}%% \def\uIntoB{\VerticalMap\uhvee|\empty|\uthookb}%% \let\uInfromA\uIntoA\let\uInfromB\uIntoB\let\lInfromA\lIntoA\let\lInfromB \lIntoB\let\rInto\rIntoA\let\lInto\lIntoA\let\dInto\dIntoB\let\uInto\uIntoA \def\rEmbed{\HorizontalMap\gt-\empty-\rhvee}%% \def\lEmbed{\HorizontalMap\lhvee-\empty-\lt}%% \def\dEmbed{\VerticalMap\vee|\empty|\dhvee}%% \def\uEmbed{\VerticalMap\uhvee|\empty|\wedge} \def\rProject{\HorizontalMap\empty-\empty-\triangleright}%% \def\lProject{\HorizontalMap\triangleleft-\empty-\empty}%% \def\uProject{\VerticalMap\triangleup|\empty|\empty}%% \def\dProject{\VerticalMap\empty|\empty|\littletriangledown} \def\rOnto{\HorizontalMap\empty-\empty-\twoheadrightarrow}%% \def\lOnto{\HorizontalMap\twoheadleftarrow-\empty-\empty}%% \def\dOnto{\VerticalMap\empty|\empty|\twoheaddownarrow}%% \def\uOnto{\VerticalMap\twoheaduparrow|\empty|\empty}%% \let\lOnfrom\lOnto\let\uOnfrom\uOnto \def\hEq{\HorizontalMap==\empty==}%% \def\vEq{\VerticalMap\|\|\empty\|\|}%% \let\rEq\hEq\let\lEq\hEq\let\uEq\vEq\let\dEq\vEq \def\hLine{\HorizontalMap\empty-\empty-\empty}%% \def\vLine{\VerticalMap\empty|\empty|\empty}%% \let\rLine\hLine\let\lLine\hLine\let\uLine\vLine\let\dLine\vLine \def\hDots{\HorizontalMap\empty\hfdot\hfdot\hfdot\empty}%% \def\vDots{\VerticalMap\empty\vfdot\vfdot\vfdot\empty}%% \let\rDots\hDots\let\lDots\hDots\let\uDots\vDots\let\dDots\vDots \def\hDashes{\HorizontalMap\empty\hfdash\hfdash\hfdash\empty}%% \def\vDashes{\VerticalMap\empty\vfdash\vfdash\vfdash\empty}%% \let\rDashes\hDashes\let\lDashes\hDashes\let\uDashes\vDashes\let\dDashes \vDashes \def\rPto{\HorizontalMap\empty-\empty-\rightharpoonup}%% \def\lPto{\HorizontalMap\leftharpoonup-\empty-\empty}%% \def\uPto{\VerticalMap\upharpoonright|\empty|\empty}%% \def\dPto{\VerticalMap\empty|\empty|\downharpoonright}%% \let\lPfrom\lPto\let\uPfrom\uPto \def\NW{\NorthWest\DiagonalMap{\lah111}{\laf100}{}{\laf100}{}(2,2)}%% \def\NE{\NorthEast\DiagonalMap{\lah22}{\laf0}{}{\laf0}{}(2,2)}%% \def\SW{\SouthWest\DiagonalMap{}{\laf0}{}{\laf0}{\lah11}(2,2)}%% \def\SE{\SouthEast\DiagonalMap{}{\laf100}{}{\laf100}{\lah122}(2,2)} \def\nNW{\NorthWest\DiagonalMap{\lah135}{\laf112}{}{\laf112}{}(2,3)}%% \def\nNE{\NorthEast\DiagonalMap{\lah36}{\laf12}{}{\laf12}{}(2,3)}%% \def\sSW{\SouthWest\DiagonalMap{}{\laf12}{}{\laf12}{\lah35}(2,3)}%% \def\sSE{\SouthEast\DiagonalMap{}{\laf112}{}{\laf112}{\lah136}(2,3)} \def\wNW{\NorthWest\DiagonalMap{\lah153}{\laf121}{}{\laf121}{}(3,2)}%% \def\eNE{\NorthEast\DiagonalMap{\lah63}{\laf21}{}{\laf21}{}(3,2)}%% \def\wSW{\SouthWest\DiagonalMap{}{\laf21}{}{\laf21}{\lah53}(3,2)}%% \def\eSE{\SouthEast\DiagonalMap{}{\laf121}{}{\laf121}{\lah163}(3,2)} \def\NNW{\NorthWest\DiagonalMap{\lah113}{\laf101}{}{\laf101}{}(2,4)}%% \def\NNE{\NorthEast\DiagonalMap{\lah25}{\laf01}{}{\laf01}{}(2,4)}%% \def\SSW{\SouthWest\DiagonalMap{}{\laf01}{}{\laf01}{\lah13}(2,4)}%% \def\SSE{\SouthEast\DiagonalMap{}{\laf101}{}{\laf101}{\lah125}(2,4)} \def\WNW{\NorthWest\DiagonalMap{\lah131}{\laf110}{}{\laf110}{}(4,2)}%% \def\ENE{\NorthEast\DiagonalMap{\lah52}{\laf10}{}{\laf10}{}(4,2)}%% \def\WSW{\SouthWest\DiagonalMap{}{\laf10}{}{\laf10}{\lah31}(4,2)}%% \def\ESE{\SouthEast\DiagonalMap{}{\laf110}{}{\laf110}{\lah152}(4,2)} \def\NNNW{\NorthWest\DiagonalMap{\lah115}{\laf102}{}{\laf102}{}(2,6)}%% \def\NNNE{\NorthEast\DiagonalMap{\lah16}{\laf02}{}{\laf02}{}(2,6)}%% \def\SSSW{\SouthWest\DiagonalMap{}{\laf02}{}{\laf02}{\lah15}(2,6)}%% \def\SSSE{\SouthEast\DiagonalMap{}{\laf102}{}{\laf102}{\lah116}(2,6)} \def\WWNW{\NorthWest\DiagonalMap{\lah151}{\laf120}{}{\laf120}{}(6,2)}%% \def\EENE{\NorthEast\DiagonalMap{\lah61}{\laf20}{}{\laf20}{}(6,2)}%% \def\WWSW{\SouthWest\DiagonalMap{}{\laf20}{}{\laf20}{\lah51}(6,2)}%% \def\EESE{\SouthEast\DiagonalMap{}{\laf120}{}{\laf120}{\lah161}(6,2)} %%======================================================================% %% % %% (2) BITS OF ARROWS % %% % %%======================================================================% \font\tenln=line10 \mathchardef\lt="313C \mathchardef\gt="313E \def\rhvee{\mkern-10mu\gt}%% \def\lhvee{\lt\mkern-10mu}%% \def\dhvee{\vbox\tozpt{\vss\hbox{$\vee$}\kern0pt}}%% \def\uhvee{\vbox\tozpt{\hbox{$\wedge$}\vss}}%% \def\rhcvee{\mkern-10mu\succ}%% \def\lhcvee{\prec\mkern-10mu}%% \def\dhcvee{\vbox\tozpt{\vss\hbox{$\curlyvee$}\kern0pt}}%% \def\uhcvee{\vbox\tozpt{\hbox{$\curlywedge$}\vss}}%% \def\rhvvee{\mkern-10mu\gg}%% \def\lhvvee{\ll\mkern-10mu}%% \def\dhvvee{\vbox\tozpt{\vss\hbox{$\vee$}\kern-.6ex\hbox{$\vee$}\kern0pt}}%% \def\uhvvee{\vbox\tozpt{\hbox{$\wedge$}\kern-.6ex\hbox{$\wedge$}\vss}}%% \def\twoheaddownarrow{\rlap{$\downarrow$}\raise-.5ex\hbox{$\downarrow$}}%% \def\twoheaduparrow{\rlap{$\uparrow$}\raise.5ex\hbox{$\uparrow$}}%% \def\triangleup{{\scriptscriptstyle\bigtriangleup}}%% \def\littletriangledown{{\scriptscriptstyle\triangledown}}%% \def\rhla{\vbox\tozpt{\vss\hbox\tozpt{\hss\tenln\char'55}\kern\axisheight}}%% \def\lhla{\vbox\tozpt{\vss\hbox\tozpt{\tenln\char'33\hss}\kern\axisheight}}%% \def\dhla{\vbox\tozpt{\vss\hbox\tozpt{\tenln\char'77\hss}}}%% \def\uhla{\vbox\tozpt{\hbox\tozpt{\tenln\char'66\hss}\vss}}%% \def\htdot{\mkern3.15mu\cdot\mkern3.15mu}%% \def\vtdot{\vbox to 1.46ex{\vss\hbox{$\cdot$}}}%% \def\utbar{\vrule height 0.093ex depth0pt width 0.4em} \let\dtbar\utbar%% \def\rtbar{\mkern1.5mu\vrule height 1.1ex depth.06ex width .04em\mkern1.5mu}% \let\ltbar\rtbar%% \def\rthooka{\raise.603ex\hbox{$\scriptscriptstyle\subset$}}%% \def\lthooka{\raise.603ex\hbox{$\scriptscriptstyle\supset$}}%% \def\rthookb{\raise-.022ex\hbox{$\scriptscriptstyle\subset$}}%% \def\lthookb{\raise-.022ex\hbox{$\scriptscriptstyle\supset$}}%% \def\dthookb{\hbox{$\scriptscriptstyle\cap$}\mkern5.5mu}%% \def\uthookb{\hbox{$\scriptscriptstyle\cup$}\mkern4.5mu}%% \def\dthooka{\mkern6mu\hbox{$\scriptscriptstyle\cap$}}%% \def\uthooka{\mkern6mu\hbox{$\scriptscriptstyle\cup$}}%% \def\hfdot{\mkern3.15mu\cdot\mkern3.15mu}%% \def\vfdot{\vbox to 1.46ex{\vss\hbox{$\cdot$}}}%% \def\vfdashstrut{\vrule width0pt height1.3ex depth0.7ex}%% \def\vfthedash{\vrule width\MapBreadth height0.6ex depth 0pt}%% \def\hfthedash{\vrule\horizhtdp width 0.26em}%% \def\hfdash{\mkern5.5mu\hfthedash\mkern5.5mu}%% \def\vfdash{\vfdashstrut\vfthedash}%% \def\nwhTO{\nwarrow\mkern-1mu}%% \def\nehTO{\mkern-.1mu\nearrow}%% \def\sehTO{\searrow\mkern-.02mu}%% \def\swhTO{\mkern-.8mu\swarrow}%% \def\SEpbk{\rlap{\smash{\kern0.1em \vrule depth 2.67ex height -2.55ex width 0% .9em \vrule height -0.46ex depth 2.67ex width .05em }}}%% \def\SWpbk{\llap{\smash{\vrule height -0.46ex depth 2.67ex width .05em \vrule depth 2.67ex height -2.55ex width .9em \kern0.1em }}}%% \def\NEpbk{\rlap{\smash{\kern0.1em \vrule depth -3.48ex height 3.67ex width 0% .95em \vrule height 3.67ex depth -1.39ex width .05em }}}%% \def\NWpbk{\llap{\smash{\vrule height 3.6ex depth -1.39ex width .05em \vrule depth -3.48ex height 3.67ex width .95em \kern0.1em }}} %%======================================================================% \newcount\cdna\newcount\cdnb\newcount\cdnc\newcount\cdnd\cdna=\catcode`\@% \catcode`\@=11 \let\then\relax\def\loopa#1\repeat{\def\bodya{#1}\iteratea}% \def\iteratea{\bodya\let\next\iteratea\else\let\next\relax\fi\next}\def\loopb #1\repeat{\def\bodyb{#1}\iterateb}\def\iterateb{\bodyb\let\next\iterateb\else \let\next\relax\fi\next} \def\mapctxterr{\message{commutative diagram: map context error}}\def\mapclasherr{\message{commutative diagram: clashing maps}}% \def\ObsDim#1{\expandafter\message{! diagrams Warning: Dimension \string#1 is obsolete (ignored)}\global\let#1\ObsDimq\ObsDimq}\def\ObsDimq{\dimen@=}\def \HorizontalMapLength{\ObsDim\HorizontalMapLength}\def\VerticalMapHeight{% \ObsDim\VerticalMapHeight}\def\VerticalMapDepth{\ObsDim\VerticalMapDepth}\def \VerticalMapExtraHeight{\ObsDim\VerticalMapExtraHeight}\def \VerticalMapExtraDepth{\ObsDim\VerticalMapExtraDepth}\def\ObsCount#1{% \expandafter\message{! diagrams Warning: Count \string#1 is obsolete (ignored% )}\global\let#1\ObsCountq\ObsCountq}\def\ObsCountq{\count@=}\def \DiagonalLineSegments{\ObsCount\DiagonalLineSegments}\def\tozpt{to\z@}\def \sethorizhtdp{\dimen8=\axisheight\dimen9=\MapBreadth\advance\dimen8.5\dimen9% \advance\dimen9-\dimen8}\def\horizhtdp{height\dimen8 depth\dimen9 }\def \axisheight{\fontdimen22\the\textfont2 }\countdef\boxc@unt=14 \def\bombparameters{\hsize\z@\rightskip\z@ plus1fil minus\maxdimen \parfillskip\z@\linepenalty9000 \looseness0 \hfuzz\maxdimen\hbadness10000 \clubpenalty0 \widowpenalty0 \displaywidowpenalty0 \interlinepenalty0 \predisplaypenalty0 \postdisplaypenalty0 \interdisplaylinepenalty0 \interfootnotelinepenalty0 \floatingpenalty0 \brokenpenalty0 \everypar{}% \leftskip\z@\parskip\z@\parindent\z@\pretolerance10000 \tolerance10000 \hyphenpenalty10000 \exhyphenpenalty10000 \binoppenalty10000 \relpenalty10000 \adjdemerits0 \doublehyphendemerits0 \finalhyphendemerits0 \prevdepth\z@}\def \startbombverticallist{\hbox{}\penalty1\nointerlineskip} \def\pushh#1\to#2{\setbox#2=\hbox{\box#1\unhbox#2}}\def\pusht#1\to#2{\setbox#% 2=\hbox{\unhbox#2\box#1}} \newif\ifallowhorizmap\allowhorizmaptrue\newif\ifallowvertmap \allowvertmapfalse\newif\ifincommdiag\incommdiagfalse \def\diagram{\hbox\bgroup$\vcenter\bgroup\startbombverticallist \incommdiagtrue\baselineskip\DiagramCellHeight\lineskip\z@\lineskiplimit\z@ \mathsurround\z@\tabskip\z@\let\\\diagcr\allowhorizmaptrue\allowvertmaptrue \halign\bgroup\lcdtempl##\rcdtempl&&\lcdtempl##\rcdtempl\cr}\def\enddiagram{% \crcr\egroup\reformatmatrix\egroup$\egroup}\def\commdiag#1{{\diagram#1% \enddiagram}} \def\lcdtempl{\futurelet\thefirsttoken\dolcdtempl}\newif\ifemptycell\def \dolcdtempl{\ifx\thefirsttoken\rcdtempl\then\hskip1sp plus 1fil \emptycelltrue \else\hfil$\emptycellfalse\objectstyle\fi}\def\rcdtempl{\ifemptycell\else$% \hfil\fi}\def\diagcr{\cr} \def\across#1{\span\omit\mscount=#1 \loop\ifnum \mscount>2 \spAn\repeat\ignorespaces}\def\spAn{\relax\span\omit\advance \mscount by -1} \def\CellSize{\afterassignment\cdhttowd\DiagramCellHeight}\def\cdhttowd{% \DiagramCellWidth\DiagramCellHeight}\def\MapsAbut{\MapShortFall\z@} \newcount\cdvdl\newcount\cdvdr\newcount\cdvd\newcount\cdbfb\newcount\cdbfr \newcount\cdbfl\newcount\cdvdr\newcount\cdvdl\newcount\cdvd \def\reformatmatrix{\bombparameters\cdvdl=\insc@unt\cdvdr=\cdvdl\cdbfb=% \boxc@unt\advance\cdbfb1 \cdbfr=\cdbfb\setbox1=\vbox{}\dimen2=\z@\loop\setbox 0=\lastbox\ifhbox0 \dimen1=\lastskip\unskip\dimen5=\ht0 \advance\dimen5 \dimen 1 \dimen4=\dp0 \penalty1 \reformatrow\unpenalty\ht4=\dimen5 \dp4=\dimen4 \ht3% \z@\dp3\z@\setbox1=\vbox{\box4 \nointerlineskip\box3 \nointerlineskip\unvbox1% }\dimen2=\dimen1 \repeat\unvbox1} \newif\ifcontinuerow \def\reformatrow{\cdbfl=\cdbfr\noindent\unhbox0 \loopa\unskip\setbox\cdbfl=% \lastbox\ifhbox\cdbfl\advance\cdbfl1\repeat\par\unskip\dimen6=2% \DiagramCellWidth\dimen7=-\DiagramCellWidth\setbox3=\hbox{}\setbox4=\hbox{}% \setbox7=\box\voidb@x\cdvd=\cdvdl\continuerowtrue\loopa\advance\cdvd-1 \adjustcells\ifcontinuerow\advance\dimen6\wd\cdbfl\cdda=.5\dimen6 \ifdim\cdda <\DiagramCellWidth\then\dimen6\DiagramCellWidth\advance\dimen6-\cdda \nopendvert\cdda\DiagramCellWidth\fi\advance\dimen7\cdda\dimen6=\wd\cdbfl \reformatcell\advance\cdbfl-1 \repeat\advance\dimen7.5\dimen6 \outHarrow} \def \adjustcells{\ifnum\cdbfr>\cdbfl\then\ifnum\cdvdr>\cdvd\then\continuerowfalse \else\setbox\cdbfl=\hbox to\wd\cdvd{\lcdtempl\VonH{}\rcdtempl}\fi\else\ifnum \cdvdr>\cdvd\then\advance\cdvdr-1 \setbox\cdvd=\vbox{}\wd\cdvd=\wd\cdbfl\dp \cdvd=\dp1 \fi\fi} \def\reformatcell{\sethorizhtdp\noindent\unhbox\cdbfl\skip0=\lastskip\unskip \par\ifcase\prevgraf\reformatempty\or\reformatobject\else\reformatcomplex\fi \unskip}\def\reformatobject{\setbox6=\lastbox\unskip\vadjdon6\outVarrow \setbox6=\hbox{\unhbox6}\advance\dimen7-.5\wd6 \outHarrow\dimen7=-.5\wd6 \pusht6\to4}\newcount\globnum \def\reformatcomplex{\setbox6=\lastbox\unskip\setbox9=\lastbox\unskip\setbox9% =\hbox{\unhbox9 \skip0=\lastskip\unskip\global\globnum\lastpenalty\hskip\skip 0 }\advance\globnum9999 \ifcase\globnum\reformathoriz\or\reformatpile\or \reformatHonV\or\reformatVonH\or\reformatvert\or\reformatHmeetV\fi} \def\reformatempty{\vpassdon\ifdim\skip0>\z@\then\hpassdon\else\ifvoid2 \then \else\advance\dimen7-.5\dimen0 \cdda=\wd2\advance\cdda.5\dimen0\wd2=\cdda\fi \fi}\def\VonH{\doVonH6}\def\HonV{\doVonH7}\def\HmeetV{\MapBreadth-2% \MapShortFall\doVonH4}\def\doVonH#1{\cdna-999#1\futurelet\thenexttoken \dooVonH}\def\dooVonH{\let\next\relax\sethorizhtdp\ifallowhorizmap \ifallowvertmap\then\ifx\thenexttoken[\then\let\next\VonHstrut\else \sethorizhtdp\dimen0\MapBreadth\let\next\VonHnostrut\fi\else\mapctxterr\fi \else\mapctxterr\fi\next}\def\VonHstrut[#1]{\setbox0=\hbox{$#1$}\dimen0\wd0% \dimen8\ht0\dimen9\dp0 \VonHnostrut}\def\VonHnostrut{\setbox0=\hbox{}\ht0=% \dimen8\dp0=\dimen9\wd0=.5\dimen0 \copy0\penalty\cdna\box0 \allowhorizmapfalse \allowvertmapfalse}\def\reformatHonV{\hpassdon\doreformatHonV}\def \reformatHmeetV{\dimen@=\wd9 \advance\dimen7-\wd9 \outHarrow\setbox6=\hbox{% \unhbox6}\dimen7-\wd6 \advance\dimen@\wd6 \setbox6=\hbox to\dimen@{\hss}% \pusht6\to4\doreformatHonV}\def\doreformatHonV{\setbox9=\hbox{\unhbox9 \unskip \unpenalty\global\setbox\globbox=\lastbox}\vadjdon\globbox\outVarrow}\def \reformatVonH{\vpassdon\advance\dimen7-\wd9 \outHarrow\setbox6=\hbox{\unhbox6% }\dimen7=-\wd6 \setbox6=\hbox{\kern\wd9 \kern\wd6}\pusht6\to4}\def\hpassdon{}% \def\vpassdon{\dimen@=\dp\cdvd\advance\dimen@\dimen4 \advance\dimen@\dimen5 \dp\cdvd=\dimen@\nopendvert}\def\vadjdon#1{\dimen8=\ht#1 \dimen9=\dp#1 } \def\HorizontalMap#1#2#3#4#5{\sethorizhtdp\setbox1=\makeharrowpart{#1}\def \arrowfillera{#2}\def\arrowfillerb{#4}\setbox5=\makeharrowpart{#5}\ifx \arrowfillera\justhorizline\then\def\arra{\hrule\horizhtdp}\def\kea{\kern-0.% 01em}\let\arrstruthtdp\horizhtdp\else\def\kea{\kern-0.15em}\setbox2=\hbox{% \kea${\arrowfillera}$\kea}\def\arra{\copy2}\def\arrstruthtdp{height\ht2 depth% \dp2 }\fi\ifx\arrowfillerb\justhorizline\then\def\arrb{\hrule\horizhtdp}\def \keb{kern-0.01em}\ifx\arrowfillera\empty\then\let\arrstruthtdp\horizhtdp\fi \else\def\keb{\kern-0.15em}\setbox4=\hbox{\keb${\arrowfillerb}$\keb}\def\arrb {\copy4}\ifx\arrowfilera\empty\then\def\arrstruthtdp{height\ht4 depth\dp4 }% \fi\fi\setbox3=\makeharrowpart{{#3}\vrule width\z@\arrstruthtdp}% \ifallowhorizmap\then\let\execmap\execHorizontalMap\else\let\execmap \mapctxterr\fi\allowhorizmapfalse\gettwoargs}\def\makeharrowpart#1{\hbox{% \mathsurround\z@\edef\next{#1}\ifx\next\empty\else$\mkern-1.5mu{\next}\mkern-% 1.5mu$\fi}}\def\justhorizline{-} \def\execHorizontalMap{\dimen0=\wd6 \ifdim\dimen0<\wd7\then\dimen0=\wd7\fi \dimen3=\wd3 \ifdim\dimen0<2em\then\dimen0=2em\fi\skip2=.5\dimen0 \ifincommdiag plus 1fill\fi minus\z@\advance\skip2-.5\dimen3 \skip4=\skip2 \advance\skip2-\wd1 \advance\skip4-\wd5 \kern\MapShortFall\box1 \xleaders \arra\hskip\skip2 \vbox{\lineskiplimit\maxdimen\lineskip.5ex \ifhbox6 \hbox to% \dimen3 {\hss\box6\hss}\fi\vtop{\box3 \ifhbox7 \hbox to\dimen3 {\hss\box7\hss }\fi}}\ifincommdiag\kern-.5\dimen3\penalty-9999\null\kern.5\dimen3\fi \xleaders\arrb\hskip\skip4 \box5 \kern\MapShortFall} \def\reformathoriz{\vadjdon6\outVarrow\ifvoid7\else\mapclasherr\fi\setbox2=% \box9 \wd2=\dimen7 \dimen7=\z@\setbox7=\box6 } \def\resetharrowpart#1#2{\ifvoid#1\then\ifdim#2=\z@\else\setbox4=\hbox{% \unhbox4\kern#2}\fi\else\ifhbox#1\then\setbox#1=\hbox to#2{\unhbox#1}\else \widenpile#1\fi\pusht#1\to4\fi}\def\outHarrow{\resetharrowpart2{\wd2}\pusht2% \to4\resetharrowpart7{\dimen7}\pusht7\to4\dimen7=\z@} \def\pile#1{{\incommdiagtrue\let\pile\innerpile\allowvertmapfalse \allowhorizmaptrue\baselineskip.5\PileSpacing\lineskip\z@\lineskiplimit\z@ \mathsurround\z@\tabskip\z@\let\\\pilecr\vcenter{\halign{\hfil$##$\hfil\cr#1 \crcr}}}\ifincommdiag\then\ifallowhorizmap\then\penalty-9998 \allowvertmapfalse\allowhorizmapfalse\else\mapctxterr\fi\fi}\def\pilecr{\cr}% \def\innerpile#1{\noalign{\halign{\hfil$##$\hfil\cr#1 \crcr}}} \def\reformatpile{\vadjdon9\outVarrow\ifvoid7\else\mapclasherr\fi\penalty1 \setbox9=\hbox{\unhbox9 \unskip\unpenalty\setbox9=\lastbox\unhbox9 \global \setbox\globbox=\lastbox}\unvbox\globbox\setbox9=\vbox{}\setbox7=\vbox{}% \loopb\setbox6=\lastbox\ifhbox6 \skip3=\lastskip\unskip\splitpilerow\repeat \unpenalty\setbox9=\hbox{$\vcenter{\unvbox9}$}\setbox2=\box9 \dimen7=\z@}\def \pilestrut{\vrule height\dimen0 depth\dimen3 width\z@}\def\splitpilerow{% \dimen0=\ht6 \dimen3=\dp6 \noindent\unhbox6\unskip\setbox6=\lastbox\unskip \unhbox6\par\setbox6=\lastbox\unskip\ifcase\prevgraf\or\setbox6=\hbox\tozpt{% \hss\unhbox6\hss}\ht6=\dimen0 \dp6=\dimen3 \setbox9=\vbox{\vskip\skip3 \hbox to\dimen7{\hfil\box6}\nointerlineskip\unvbox9}\setbox7=\vbox{\vskip\skip3 \hbox{\pilestrut\hfil}\nointerlineskip\unvbox7}\or\setbox7=\vbox{\vskip\skip3 \hbox{\pilestrut\unhbox6}\nointerlineskip\unvbox7}\setbox6=\lastbox\unskip \setbox9=\vbox{\vskip\skip3 \hbox to\dimen7{\pilestrut\unhbox6}% \nointerlineskip\unvbox9}\fi\unskip} \def\widenpile#1{\setbox#1=\hbox{$\vcenter{\unvbox#1 \setbox8=\vbox{}\loopb \setbox9=\lastbox\ifhbox9 \skip3=\lastskip\unskip\setbox8=\vbox{\vskip\skip3 \hbox to\dimen7{\unhbox9}\nointerlineskip\unvbox8}\repeat\unvbox8 }$}} \def\justverticalline{|}\def\makevarrowpart#1{\hbox to\MapBreadth{\hss$\kern \MapBreadth{#1}$\hss}}\def\VerticalMap#1#2#3#4#5{\setbox1=\makevarrowpart{#1}% \def\arrowfillera{#2}\setbox3=\makevarrowpart{#3}\def\arrowfillerb{#4}\setbox 5=\makevarrowpart{#5}\ifx\arrowfillera\justverticalline\then\def\arra{\vrule width\MapBreadth}\def\kea{\kern-0.05ex}\else\def\kea{\kern-0.35ex}\setbox2=% \vbox{\kea\makevarrowpart\arrowfillera\kea}\def\arra{\copy2}\fi\ifx \arrowfillerb\justverticalline\then\def\arrb{\vrule width\MapBreadth}\def\keb {\kern-0.05ex}\else\def\keb{\kern-0.35ex}\setbox4=\vbox{\keb\makevarrowpart \arrowfillerb\keb}\def\arrb{\copy4}\fi\ifallowvertmap\then\let\execmap \execVerticalMap\else\let\execmap\mapctxterr\fi\allowhorizmapfalse\gettwoargs \def\execVerticalMap{\setbox3=\makevarrowpart{\box3}\setbox0=\hbox{}\ht0=\ht3 \dp0\z@\ht3\z@\box6 \setbox8=\vtop spread2ex{\offinterlineskip\box3 \xleaders \arrb\vfill\box5 \kern\MapShortFall}\dp8=\z@\box8 \kern-\MapBreadth\setbox8=% \vbox spread2ex{\offinterlineskip\kern\MapShortFall\box1 \xleaders\arra\vfill \box0}\ht8=\z@\box8 \ifincommdiag\then\kern-.5\MapBreadth\penalty-9995 \null \kern.5\MapBreadth\fi\box7\hfil} \newcount\colno\newdimen\cdda\newbox\globbox\def\reformatvert{\setbox6=\hbox{% \unhbox6}\cdda=\wd6 \dimen3=\dp\cdvd\advance\dimen3\dimen4 \setbox\cdvd=\hbox {}\colno=\prevgraf\advance\colno-2 \loopb\setbox9=\hbox{\unhbox9 \unskip \unpenalty\dimen7=\lastkern\unkern\global\setbox\globbox=\lastbox\advance \dimen7\wd\globbox\advance\dimen7\lastkern\unkern\setbox9=\lastbox\vtop to% \dimen3{\unvbox9}\kern\dimen7 }\ifnum\colno>0 \ifdim\wd9<\PileSpacing\then \setbox9=\hbox to\PileSpacing{\unhbox9}\fi\dimen0=\wd9 \advance\dimen0-\wd \globbox\setbox\cdvd=\hbox{\kern\dimen0 \box\globbox\unhbox\cdvd}\pushh9\to6% \advance\colno-1 \setbox9=\lastbox\unskip\repeat\advance\dimen7-.5\wd6 \advance\dimen7.5\cdda\advance\dimen7-\wd9 \outHarrow\dimen7=-.5\wd6 \advance \dimen7-.5\cdda\pusht9\to4\pusht6\to4\nopendvert\dimen@=\dimen6\advance \dimen@-\wd\cdvd\advance\dimen@-\wd\globbox\divide\dimen@2 \setbox\cdvd=\hbox {\kern\dimen@\box\globbox\unhbox\cdvd\kern\dimen@}\dimen8=\dp\cdvd\advance \dimen8\dimen5 \dp\cdvd=\dimen8 \ht\cdvd=\z@} \def\outVarrow{\ifhbox\cdvd\then\deepenbox\cdvd\pusht\cdvd\to3\else \nopendvert\fi\dimen3=\dimen5 \advance\dimen3-\dimen8 \setbox\cdvd=\vbox{% \vfil}\dp\cdvd=\dimen3} \def\nopendvert{\setbox3=\hbox{\unhbox3\kern\dimen6}}% \def\deepenbox\cdvd{\setbox\cdvd=\hbox{\dimen3=\dimen4 \advance\dimen3-\dimen 9 \setbox6=\hbox{}\ht6=\dimen3 \dp6=-\dimen3 \dimen0=\dp\cdvd\advance\dimen0% \dimen3 \unhbox\cdvd\dimen3=\lastkern\unkern\setbox8=\hbox{\kern\dimen3}% \loopb\setbox9=\lastbox\ifvbox9 \setbox9=\vtop to\dimen0{\copy6 \nointerlineskip\unvbox9 }\dimen3=\lastkern\unkern\setbox8=\hbox{\kern\dimen3% \box9\unhbox8}\repeat\unhbox8 }} \newif\ifPositiveGradient\PositiveGradienttrue\newif\ifClimbing\Climbingtrue \newcount\DiagonalChoice\DiagonalChoice1 \newcount\lineno\newcount\rowno \newcount\charno\def\laf{\afterassignment\xlaf\charno='}\def\xlaf{\hbox{% \tenln\char\charno}}\def\lah{\afterassignment\xlah\charno='}\def\xlah{\hbox{% \tenln\char\charno}}\def\makedarrowpart#1{\hbox{\mathsurround\z@${#1}$}}\def \lad{\afterassignment\xlad\charno='}\def\xlad{\setbox2=\xlaf\setbox0=\hbox to% .5\wd2{$\hss\ldot\hss$}\ht0=.25\ht2 \dp0=\ht0 \hbox{\mv-\ht0\copy0 \mv\ht0% \box0}} \def\DiagonalMap#1#2#3#4#5{\ifPositiveGradient\then\let\mv\raise\else\let\mv \lower\fi\setbox2=\makedarrowpart{#2}\setbox1=\makedarrowpart{#1}\setbox4=% \makedarrowpart{#4}\setbox5=\makedarrowpart{#5}\setbox3=\makedarrowpart{#3}% \let\execmap\execDiagonalLine\gettwoargs} \def\makeline#1(#2,#3;#4){\hbox{\dimen1=#2\relax\dimen2=#3\relax\dimen5=#4% \relax\vrule height\dimen5 depth\z@ width\z@\setbox8=\hbox to\dimen1{\tenln#1% \hss}\cdna=\dimen5 \divide\cdna\dimen2 \ifnum\cdna=0 \then\box8 \else\dimen4=% \dimen5 \advance\dimen4-\dimen2 \divide\dimen4\cdna\dimen3=\dimen1 \cdnb=% \dimen2 \divide\cdnb1000 \divide\dimen3\cdnb\cdnb=\dimen4 \divide\cdnb1000 \multiply\dimen3\cdnb\dimen6\dimen1 \advance\dimen6-\dimen3 \cdnb=0 \ifPositiveGradient\then\dimen7\z@\else\dimen7\cdna\dimen4 \multiply\dimen4-1 \fi\loop\raise\dimen7\copy8 \ifnum\cdnb<\cdna\hskip-\dimen6 \advance\cdnb1 \advance\dimen7\dimen4 \repeat\fi}}\newdimen\objectheight\objectheight1.5ex \def\execDiagonalLine{\setbox0=\hbox\tozpt{\cdna=\xcoord\cdnb=\ycoord\dimen8=% \wd2 \dimen9=\ht2 \dimen0=\cdnb\DiagramCellHeight\advance\dimen0-2% \MapShortFall\advance\dimen0-\objectheight\setbox2=\makeline\box2(\dimen8,% \dimen9;.5\dimen0)\setbox4=\makeline\box4(\dimen8,\dimen9;.5\dimen0)\dimen0=2% \wd2 \advance\dimen0-\cdna\DiagramCellWidth\advance\dimen0 2\DiagramCellWidth \dimen2\DiagramCellHeight\advance\dimen2-\MapShortFall\dimen1\dimen2 \advance \dimen1-\ht1 \advance\dimen2-\ht2 \dimen6=\dimen2 \advance\dimen6.25\dimen8 \dimen3\dimen2 \advance\dimen3-\ht3 \dimen4=\dimen2 \dimen7=\dimen2 \advance \dimen4-\ht4 \advance\dimen7-\ht7 \advance\dimen7-.25\dimen8 \ifPositiveGradient\then\hss\raise\dimen4\hbox{\rlap{\box5}\box4}\llap{\raise \dimen6\box6\kern.25\dimen9}\else\kern-.5\dimen0 \rlap{\raise\dimen1\box1}% \raise\dimen2\box2 \llap{\raise\dimen7\box7\kern.25\dimen9}\fi\raise\dimen3% \hbox\tozpt{\hss\box3\hss}\ifPositiveGradient\then\rlap{\kern.25\dimen9\raise \dimen7\box7}\raise\dimen2\box2\llap{\raise\dimen1\box1}\kern-.5\dimen0 \else \rlap{\kern.25\dimen9\raise\dimen6\box6}\raise\dimen4\hbox{\box4\llap{\box5}}% \hss\fi}\ht0\z@\dp0\z@\box0} \def\NorthWest{\PositiveGradientfalse\Climbingtrue\DiagonalChoice0 }\def \NorthEast{\PositiveGradienttrue\Climbingtrue\DiagonalChoice1 }\def\SouthWest {\PositiveGradienttrue\Climbingfalse\DiagonalChoice3 }\def\SouthEast{% \PositiveGradientfalse\Climbingfalse\DiagonalChoice2 } \newif\ifmoremapargs\def\gettwoargs{\setbox7=\box\voidb@x\setbox6=\box \voidb@x\moremapargstrue\def\whichlabel{6}\def\xcoord{2}\def\ycoord{2}\def \contgetarg{\def\whichlabel{7}\ifmoremapargs\then\let\next\getanarg\let \contgetarg\execmap\else\let\next\execmap\fi\next}\getanarg}\def\getanarg{% \futurelet\thenexttoken\switcharg}\def\getlabel#1#2#3{\setbox#1=\hbox{$% \labelstyle\>{#3}\>$}\dimen0=\ht#1\advance\dimen0 .4ex\ht#1=\dimen0 \dimen0=% \dp#1\advance\dimen0 .4ex\dp#1=\dimen0 \contgetarg}\def\eatspacerepeat{% \afterassignment\getanarg\let\junk= }\def\catcase#1:{{\ifcat\noexpand \thenexttoken#1\then\global\let\xcase\docase\fi}\xcase}\def\tokcase#1:{{\ifx \thenexttoken#1\then\global\let\xcase\docase\fi}\xcase}\def\default:{\docase}% \def\docase#1\esac#2\esacs{#1}\def\skipcase#1\esac{}\def\getcoordsrepeat(#1,#% 2){\def\xcoord{#1}\def\ycoord{#2}\getanarg}\let\esacs\relax\def\switcharg{% \global\let\xcase\skipcase\catcase{&}:\moremapargsfalse\contgetarg\esac \catcase\bgroup:\getlabel\whichlabel-\esac\catcase^:\getlabel6\esac\catcase_:% \getlabel7\esac\tokcase{~}:\getlabel3\esac\tokcase(:\getcoordsrepeat\esac \catcase{ }:\eatspacerepeat\esac\default:\moremapargsfalse\contgetarg\esac \esacs} % -- define eqalign and friends \catcode`\@=11 \newskip\c@nt@ring\c@nt@ring=0pt plus 1000pt minus 1000pt \def\eqalign#1{\,\vcenter{\openup1\jot \m@th \ialign{\strut\hfil$\displaystyle{##}$&$\displaystyle{{}##}$\hfil \crcr#1\crcr}}\,} \def\eqalignno#1{\displ@y \tabskip=\c@nt@ring \halign to\displaywidth{\hfil$\displaystyle{##}$\tabskip=0pt &$\displaystyle{{}##}$\hfil\tabskip=\c@nt@ring &\llap{$##$}\tabskip=0pt\crcr#1\crcr}} \def\leqalignno#1{\displ@y \tabskip=\c@nt@ring \halign to\displaywidth{\hfil$\displaystyle{##}$\tabskip=0pt &$\displaystyle{{}##}$\hfil\tabskip=\c@nt@ring &\kern-\displaywidth\rlap{$##$}\tabskip=\displaywidth\crcr#1\crcr}} \catcode`\@=12 \widowpenalty=5000 \begin{document} \title{Metric Process Models} \author{Roger Frederick Crew} \principaladviser{Vaughan Pratt} \firstreader{John Mitchell} \secondreader{Jos\'e Meseguer\\(Center for Study of Language and Information)} \figurespagefalse \tablespagefalse \submitdate{November 1991} \beforepreface \begin{abstract} Among the various formal models proposed to provide semantics for concurrency constructs in programming languages, partial orders have the advantages of conceptual simplicity, mathematical tractability, and economy of expression. We first observe that the theory of enriched categories supplies a natural abstraction of the notion of partial order, the \%D-schedule. Varying the choice of temporal domain \%D allows for other forms of temporal constraint beyond that available from simple ordering. For example, having the constraints on inter-event delays be numeric bounds produces a generalized metric-space structure suitable for the discussion of real-time computation. We then construct an algebra of {\em processes} parametrized by notion of time. Here a process is a structure based on schedules that also incorporates nondeterminism. Since the model is category-based, we can define operations on \%D-schedules and processes via universal constructions that depend little on the choice of \%D. Also, given a suitable choice of process structure and process morphism, the constructions used for process operations and schedule operations are remarkably similar. \end{abstract} \prefacesection{Acknowledgements} I would first of all like to thank my advisor, Vaughan Pratt, whose ideas, inspirations and criticisms have contributed substantially to this work. Ross Casley and Jos\'e Meseguer, as collaborators, have also had a significant influence. Committee members John Mitchell and Robert Floyd provided useful comments. Conversations with various other people, including John Power, Mike Johnson, Phil Scott, Bard Bloom and Albert Meyer have also been quite helpful. This research was supported in part by a National Science Foundation Graduate Fellowship. And yes, I used Paul Taylor's commutative diagram macros. For that matter, I also used Don Knuth's \TeX, Leslie Lamport's \LaTeX, Supoj Sutanthavibul's {\tt fig} package, Richard Stallman's GNU Emacs and numerous other pieces of software without which this work would have taken much longer to produce. Nor would it be right to ignore the excellent facilities and support provided by Stanford's Computer Science Department and CSD-CF. Lastly, I am profoundly grateful to my parents (not to mention the rest of my family for that matter) for the encouragement and support they have provided over the years. \afterpreface \prefacesection{Outline} In Chapter \ref{chapter:tstruct}, we present the concept of schedule, a collection of events with temporal constraints, which is rooted in the various extant partial order models of concurrency \cite{Gre,Gra,NPW,MS,Pr82,PW}, particularly that of the labeled partial order or {\em pomset} model of Grabowski \cite{Gra} and Pratt \cite{Pr82}. Partial orders themselves are a sort of primaeval form of schedule having no further temporal information beyond some order constraints between events. We consider the similarities of the axioms of partial orders and metric spaces the goal being to define abstract schedule operations that are applicable to all. This is done by having an appropriate notion of schedule map or {\em morphism} and then using various universal constructions from category theory. Much of the material presented in this chapter is actually joint work with Casley, Meseguer and Pratt, and as such is mainly developed elsewhere (see \cite{CCMP,Cas91}). It does, however provide necessary groundwork for the process model presented in the following chapter. Chapter \ref{chapter:proc} is the main focus of the thesis and is concerned with introducing choice or nondeterminism into the model. The initial vehicle for this is the disjunctive-normal-form (set of sets) representation of process that is inherent in the original partial order model and indeed in the trace theoretic model from which it derives. Once the appropriate additional structure is determined and once we have an appropriate definition for {\em process} morphism, many of the categorical definitions for operations go through as for schedules. As an example, Chapter \ref{chapter:dflow} considers the application of this model to dataflow nets, recasting a result of Gaifman and Pratt \cite{GP} from partial orders to the more general setting of physical \%D-schedules. \chapter{Temporal Structures}\label{chapter:tstruct} \def\R{{\mbox{${\bf\bar R}^{op}$}}} \def\of{\circ} \section{Events and Constraints} The most basic notion of {\em schedule} is that of set of events together with some further collection of constraints on these events that determine when they are allowed to occur. At first glance, it may seem sufficient to refer to one's intuitive notion of an event, remarking that we leave our events uninterpreted; i.~e., the question of whether a particular event is a disk-read or a light bulb turning on is of no concern to us. However, it should be clarified that in our scheme, events are atomic or {\em instantaneous} entities. This becomes important when we generalize the partial order notion of time, with its vague notions of `before' and `after', to a metric notion with actual numerical delays. For example, if we have events corresponding to a light bulb turning on and a door closing, what we actually need are particular moments, e.~g., the instant in which the filament achieves two-thirds of its eventual brightness and the instant the door first contacts the doorjamb, in order to make meaningful a phrase like ``the door closes 5 seconds after the light goes on.'' It may seem unduly restrictive to insist that events be atomic when, in reality, almost none of the particular actions one wishes to model are actually atomic or instantaneous. However, in our usage, ``event'' means the referential moment rather than the full action. In our example, the closing of a door, which undoubtedly takes a few seconds to complete, is not an event {\em per se}. What usually happens, however is that one is not concerned with the duration of the door-closing action, and so one merely choses or takes as implicit an arbitrary moment during the swing of the door and identifies that event with the entire process of the door being closed. This interpretation makes it possible to consider ``refinement of events'' in a realm where events are atomic, since now one is not refining an event so much as refining the particular tasks or actions of which that event is a part. The result of a refinement is {\em not} a subdivided event, but rather the inclusion of several additional referential moments of a given task or action into our schedule. The events themselves remain instantaneous, we just see more of them. We next consider the question of the form the constraints should take. Perhaps the coarsest notion of constraint on a set of events is that of an ordering, namely that of a set of events, E, with a partial order relation $\le$ constraining the temporal order in which these events may occur. In this case $\le$ satisfies the usual axioms. \tracingmacros=1 $$\eqalignno{&x\le x&(R)\cr x\le y \land y\le z\implies &x\le z&(T)\cr x\le y \land y\le x\implies &x = y&(A)\cr Recall the typical interpretation for $x\le y$, namely that of being an abbreviation for ($x$. Here $\:p=(E,\^d)$ is a schedule in the previous sense, and $\^m:E\to\Sigma$ is a labeling map. $\^S\in\Set$ is referred to as the {\em alphabet}. As with ordinary schedules, we can make a category out of these, here using the comma category construction \cite{Mac}. \begin{definition} Given categories $\%A,\%B,\%C$ and functors $V:\%A\to\%C, W:\%B\to\%C$ the {\em comma category} $(V\comma W)$ is the category for which \begin{itemize} \item objects are triples $\$, where $a\in\ob(\%A)$, $b\in\ob(\%B)$ and $\^g:Va\to Wb$ in \%C. \item morphisms are pairs $\<\^a:a\to a',\^b:b\to b'>$ for which the following diagram commutes: $$\begin{diagram} Va&\rTo^{V\^a}&Va'\\ \dTo^{\^g}&&\dTo^{\^g'}\\ Wb&\rTo^{W\^b}&Wb'\\ \end{diagram}$$ \item $1_{\}=\<1_a,1_b>$, while $\<\^a':a'\to a'',\^b':b'\to b''>\circ\<\^a:a\to a',\^b:b\to b'>=\<\^a'\circ\^a:a\to a'',\^b'\circ\^b:b\to b''>$. \end{itemize}. \end{definition} Starting with a given category of ordinary schedules \%T, the corresponding category of labeled schedules is then $(V\comma 1_{\_\Set})$, where $V:\%T\to\_\Set$ is the forgetful functor taking an ordinary schedule to its underlying event set and $1_{\_\Set}:\_\Set\to\_\Set$ is the identity functor. If there is no ambiguity about the functors involved we denote this $(\%T\comma\_\Set)$. A morphism of labeled schedules $\<\:p,\^m,\^S>\to\<\:q,\^n,\^G>$ is then a pair of maps $\$ where $f:\:p\to\:q$ is an (unlabeled) schedule map and $$\begin{diagram} V\:p&\rTo^{Vf}&V\:q\\ \dTo^{\^m}&&\dTo^{\^n}\\ \^S&\rTo^{\^f}&\^G\\ \end{diagram}$$ commutes. Operations that are defined via universal constructions (e.~g., concurrence or concatenation) follow automatically from the definition of morphism. \cite{CCMP} discusses these at length and in any case they are not qualitatively different from the corresponding operations already described for unlabeled schedules. Carrying out this construction using $\%T=\_\Pos$ yields the category $\_\Pom$ of labeled partial orders (pomsets). Of interest however are those operations arising from label manipulations. For the rest of this section, \%B refers to a category of labeled schedules arising from the comma construction on some category \%T of ordinary schedules, be it \_\Pos, {\%D\hCat} or {\%D\hPhys} for some temporal domain \%D, For any particular alphabet $\^S\in\Set$, we can find within $\%B$ the category $\%B_{\^S}$ of behaviors over that alphabet. $\%B_{\^S}$ is actually $(V\comma\^S)$, where $\^S:\_1\to\_\Set$ is the functor taking the single object of \_1 to the set $\^S$. Now, for any alphabet translation $t:\^G\to\^S$ there are two canonical functors we can define: \begin{itemize} \item The {\em renaming} $t_*:\%B_{\^G}\to\%B_{\^S}$, which takes a behavior $q=\<\:q,\nu,\^G>$ and relabels all of its events to produce $\<\:q,t\nu,\^S>$. $$\commdiag {V(\:q=t_*\:q)&&\cr \dTo{}{\mu}&&\cr \^G&\rTo{t}{}&\^S\cr \item The {\em restriction} $t^*:\%B_{\^S}\to\%B_{\^G}$, which, in the case of $t$ being a monomorphism, takes $p=\<\:p,\mu,\^S>$ to $\<\:p\cap\mu^{-1}t(\^G),t^{-1}\mu,\^G>$, that is, we take $t^*p$ to be that subbehavior of $p$ whose events are those with labels in $t(\^G)$. If, in fact, the underlying temporal structure category $\%T$ {\em is} $\%D\hCat$, which is actually complete, we can instead obtain the unlabeled schedule $t^*\:p$ by taking the pullback $$\begin{diagram} t^*\:p&\rTo{}{}&\:p\\ \dTo{}{}&&\dTo{}{\mu}\\ E\^G&\rTo{Et}{}&E\^S\\ \end{diagram}$$ Here, $E:\Set\to\%T$ is the right adjoint to {\%T}'s forgetful functor (e.~g., the clique functor in the case of preorders). That this is an equivalent construction is proven in \cite{CCMP}. \end{itemize} The projection $\^s:\%B\to\Set$ (taking any behavior $\<\:p,\mu,\^S>$ to its alphabet $\^S$) is in fact a {\it fibration} \cite{J83}. For a more detailed discussion of this in the context of labeled Petri-nets see \cite{Win88}. In the case of general {\%T} where $t$ is not injective, it is not necessarily the case that $t^*$ exists. For our purposes, it will suffice to note that for any $t$ for which $t_*$ and $t^*$ both exist, we have that $t^*$ is a right adjoint of $t_*$, which is actually a stricter condition. \comment{ In fact, in $\Pom$, $t^*$ never exists if $t$ is not injective. This is one of a number of points of grief resulting from the fact that the category of posets is actually a subcategory of the {*** why we prefer Prom to Pom} In general, we consider ourselves to have been given a category $\%B$, understood to be some $(\mbox{\%T}\comma\^S)$ where $\%T$ is a suitable subcategory of \%D\hCat. The objects of $\%B$ are behaviors and whose morphisms can be interpreted as relating behaviors to component behaviors (as above). This category is equipped with a projection $\^s:\%B\to\Set$ (giving the alphabet of actions associated with any given behavior) and for each alphabet $\^S$ we also have the fibre category $\%B_{\^S}$ consisting of behaviors of {\%B} over that particular alphabet, or alternatively, the inverse image under $\^s$ of the object $\^S$ and its identity morphism. We also assume for any $t:\^G\to\^S$ the existence of translation ($t_*:\%B_{\^G}\to\%B_{\^S}$) and in the cases where we need it, the existence of the restriction ($t^*:\%B_{\^S}\to\%B_{\^G}$) functor accompanied by an adjunction $t_*\dashv t^*$. Given our concern about colimits, we should note that if $\%T$ is almost cocomplete and its forgetful functor preserves colimits, then $\%B$ is also almost cocomplete\footnote {In the case of $\Pom$, this result is not applicable, but we still have cocompleteness. However, colimits in {\Pom} do not respect the alphabets, that is, $\^s:\Pom\to\Set$ does not preserve colimits. For this reason we prefer, when considering partial orders to either \begin{enumerate} \item think of them as preorders, i.~e., work in \_2\hCat, allowing for the possibility of distinct simultaneous events, or \item interpret the order relations as being strict, i.~e., work in $\Pom_{<}=(\Pos_{<}\comma\Set)$, \end{enumerate} and avoid using {\Pom} altogether.}. Likewise if $\%T$ is complete, then so is $\%B$. One can then go on to define some basic behaviors and behavior operations using colimits, limits, restrictions, translations. Another source of operations is the lifting of the symmetric monoidal structure $(\otimes,I)$ from {\%D} to {\%D\hCat} and thence to {\%B}, providing both of the latter categories with a $\otimes$ ($\otimes_{\%B}$ is orthocurrence as in \cite{Pr86}) and an $I$. Details of the constructions are in \cite{CCMP}. \chapter{Processes}\label{chapter:proc} \section{Choice and Process Morphisms} The element missing from the the machinery presented thus far for schedules is that of nondeterminism or {\em choice}. In particular, as we have presented it, a schedule carries the implicit assumption that all of the constituent events will be enacted. If one wishes to indicate that an event need {\em not} occur in certain circumstances, let alone the possibility that two events may mutually exclude each other, additional structure is required. It is this augmented schedule-with-choices notion that we shall refer to as a {\em process}. Winskel \cite{NPW}, in his event structure model, provides for nondeterminism by producing a single schedule (i.~e., the partial order case thereof) and annotating it with a {\em conflict} relation. The interpretation placed on this is that only some conflict-free subset of the given events will occur. Thus, a process that enacts one of the events $a$ or $b$ but not both would be depicted as a schedule containing the events $a$ and $b$, but also containing the pair $(a,b)$ in its conflict relation. Note that in this case we still have to provide temporal information relating $a$ and $b$ even though conflict renders such information irrelevant. While the event structure model provides for a fairly simple structure, its conflation of causal and temporal order is troublesome when attempting to derive generalizations in which the temporal information is given by something other than a partial order. While we will not go so far as to claim that causality, unlike time, is fundamentally a binary relation, i.~e., either one event causes another or it does not, we do however suggest that extensions to this depiction of causality will not necessarily take the same form as the corresponding extension to the notion of time as a partial order. The approach we shall take is the one adopted by Pratt \cite{Pr86} and Grabowski \cite{Gra} in the original pomset model, namely that which represents a choice by using distinct schedules. In this framework, a process is then a {\em set} of schedules. The interpretation here is that some schedule in the process' set will be followed, i.~e., there exists a schedule in the process' set for which all events will occur in accordance with its temporal constraints. For example, the above process that can perform one of $a$ or $b$ is now a set containing two schedules, each having but a single event. Additional structure is clearly necessary, but for now we consider the implications of this set-of-sets notion of process by itself. Henceforth, \%B shall refer to some category of schedules, labeled or unlabeled, as developed in the previous chapter (e.~g., {\%D\hPhys} for some suitable temporal domain \%D). \%B essentially serves as a parameter in the process construction. \subsection{Processes as DNF Propositions} So consider, for now, the individual schedules as bare sets, i.~e., ignoring any temporal information that may be present. We then have an outer set, which we shall call the {\em alternative} or {\em index} set, i.~e., the set of alternatives indexing the schedules from which the process has to choose. The first observation is that this form of process is essentially a disjunctive normal form (DNF) representation. That is, we have a set of disjuncts, namely the alternatives, each of which is itself a conjunction, i.~e., a schedule of events which {\em all} occur. Taken as a whole, the process, stripped of its temporal information, is simply a DNF proposition concerning which events will actually occur. Since a major goal of this work is to extend the notion of schedule composition to process composition, we should, in the course of establishing our notion of process, also establish a corresponding notion of morphism from one process to another, the intention being that these morphisms play exactly the same role for processes as the above-described schedule morphisms do for schedules, e.~g., relating an observation to a process or embedding one process in another. With our processes being DNF propositions, a natural choice of morphism to use to relate these propositions is that of {\em implication}. Consider the special case of conjunctions, where a suitable way to demonstrate that $$\bigwedge_{i\in I} p_i \implies \bigwedge_{j\in J} q_j$$ is to exhibit a function $f:J\to I$ such that for all $j\in J$ we have $p_{f(j)}\implies q_j$. Such a function provides an elementary proof by demonstrating that every conjunct on the right is implied by something on the left. Moreover, if the implication holds, there will exist a proof of this form, though there is certainly no claim about it being unique. Dually, we may prove $$\bigvee_{i\in I} p_i \implies \bigvee_{j\in J} q_j$$ via a function $f:I\to J$ such that for all $i\in I$ we have $p_i\implies q_{f(i)}$. Here, every disjunct on the left implies something on the right. A category \%C determines a category of indexed families of objects of \%C via a standard construction. \begin{definition} For any category \%C, the category $\_\Fam(\%C)$ has objects of the form $\_C=(I,(C_i)_{i\in I})$ for some index set $I\in\_\Set$ and some collection of objects $C_i\in\ob(\%C)$, and morphisms $\_f:\_C^1\to\_C^2$ of the form $(f:I^1\to I^2,(f_i:C^1_i\to C^2_{f(i)})_{i\in I^1})$, where $f$ is a function and the $f_i$ are morphisms of \%C. Composition and identity are given by $\_g\_f=(gf,(g_{f(i)}f_i)_{i\in I^1})$ and $1_{\_C}=(1_I,(1_{C_i})_{i\in I})$ respectively. \end{definition} That is, we provide a function between the index sets, and for every element of the domain's index set we provide a morphism relating the corresponding objects of \%C. Thus, given a category \%A of atomic propositions and implications thereof, we can construct \begin{itemize} \item $\_\Fam(\%A)^{op}$, the category of conjunctive propositions on \%A (i.~e., conjunctions of atomic propositions). Notice that in the case of conjunctions, the direction of the implication runs opposite to that of the functions involved, thus the need for the ${-}^{op}$. \item $\_\Fam(\_\Fam(\%A)^{op})$, the category of disjunctions of propositions of $\_\Fam(\%A)$, or, equivalently, the category of DNF propositions on \%A. \end{itemize} We now wish to generalize the two-step construction above by replacing $\_\Fam(\%A)$ by the schedule category \%B. According to this generalization, a schedule can be interpreted as a generalized conjunction of event-occurrence propositions. Therefore, it is not actually necessary to produce a category \%A of atomic event-occurrence propositions, since in the case of schedules, this provides no new information about the identities of events beyond what the schedule morphism itself already provides. If a schedule morphism maps one event to another, those events are identified and thus the occurrence of one implies the occurrence of the other vacuously. It is, however important to note that schedule morphisms run in a direction inverse to that of the implication between the corresponding propositions. That is, if we have a schedule morphism $R\to S$, any occurrence of events satisfying $S$ necessarily satisfies $R$ as well. Thus, as propositions, $S\implies R$. The actual category of schedules-as-propositions is then $\%B^{op}$, which suggests that the corresponding category of processes, i.~e., DNF propositions instead of pure conjunctive propositions, is going to be $\_\Fam(\%B^{op})$. \subsection{Adding Structure to the Alternative Set} Of course, we should now consider what the actual structure on processes should be. While our main motive in using a DNF style of process is that of achieving a separation of causal and temporal information, the structureless set of schedules described thus far achieves, in some sense, much too complete a separation. \begin{itemize} \item Having an unstructured set of choices removes the possibility of describing {\em when} a particular choice takes place. Thus we have the generally recognized advantage of {\em branch\-ing-time} models. \item There is nothing to relate events on distinct schedules within a process. \end{itemize} To elaborate on the latter objection, if, e.~g., one wishes to specify that, once events $a$ and $b$ occur, $c$ {\em may} occur, one may consider a process of two schedules whose event sets are $\{a,b\}$ and $\{a,b,c\}$. However, unless we stipulate that all events used in schedules are drawn from some universal set of events, there is nothing to indicate that the two $a$'s are supposed to refer to the same event (``same'' in the sense that whether the event occurs has the same implications for observers of this process, no matter that it occurred in one schedule or the other). The situation becomes even more obscure should we be using labeled schedules and thus might want to specify something like ``$c$ {\em may} occur once $a$ has occurred {\em twice},'' though this is perhaps more properly a hazard of using event labels to specify events. Thus the additional structure on processes should establish the required identities between events of the constituent schedules. We already have a mechanism to facilitate this, namely that of schedule morphisms. The alternative set will then be (at least) preordered, the idea being that if an event $c$ may optionally occur but only given that some other collection of events has occurred, we essentially have two schedules (alternatives), one containing only the predecessor events, the other containing $c$ as well; the latter alternative being later in the ordering on alternatives. This mapping of alternative orderings to schedule morphisms suggests viewing the alternative set itself as a category, thus allowing for the concise definition of process as being a functor into the schedule category \%B. \begin{definition} Given a category (of schedules) $\%B$, a {\em process} $P$ over $\%B$ is a category $A_P$ together with a functor $P:A_P\to\%B$. \end{definition} Having upgraded the alternative set from a mere set to a category, we should also present the corresponding upgrading of the $\_\Fam(\%B^{op})$ notion of process morphism. As might be expected, however, we have both the order structure on the alternative set and the various schedules' temporal constraints to contend with. In this case, the function between the alternative sets becomes a functor and the family of schedule functions becomes a natural transformation. \begin{definition} A {\em process morphism} $f:P\to Q$ is a functor $A_f:A_P\to A_Q$ together with a natural transformation $f:QA_f\nato P:A_P\to\%B$. We denote the category of processes over $\%B$ as \PROC{\%B}. \end{definition} As a matter of convention, we take the direction of the process morphism to be the same as that of the implication of the corresponding propositions, which also happens to match the direction of the underlying set function on the alternative sets. It is only in the unfortunate situation of schedules and schedule morphisms that the directions of the set functions and the implications happen to be different. Given that we wish to adhere to previous conventions for schedule morphisms, the natural transformation must necessarily be in the opposite direction. The import of the naturality itself $$\commdiag {QA_f(p)&\rTo{f_p}{}&P(p)\cr \dTo{}{}&&\dTo{}{}\cr QA_f(p')&\rTo{f_{p'}}{}&P(p')\cr is simply that if satisfaction of $P$ implies satisfaction of $Q$, $P$ should ``agree with'' any identification of events made between schedules of $Q$. Otherwise an anomalous situation arises wherein for some pair of events of $P$, one has occurred, one has not, and $Q$ has both of these events identified. Despite the categorical machinery thus far invoked, we have not yet introduced any real structure since while this definition allows for the introduction of a nontrivial ordering on $A_P$ and indicates how, in such a case, these orderings should be reflected in the constituent schedules, it does not require the ordering to be anything other than discrete. Further stipulations will be necessary to obtain a more recognizable notion of process. To that end, we now consider the various subcategories of \PROC{\%B} in which our true interest lies. If we consider only processes $P$ for which $A_P$ is a discrete category, we are, as noted above, back to our previous definition which corresponds to the usual notion of process as a set of schedules with no further relationships between the various alternative schedules. We will refer to these as discrete processes and the corresponding (full) subcategory as \PROCD{\%B}. While there is implicit in our choice of a DNF form for process a general desire to separate causal from temporal information, one can only take this so far. In particular, it is still generally accepted that cause precedes effect. In fact, it is only the converse implication of temporal order implying causation that we wish to suppress. Thus, we would like a stipulation that requires the schedule corresponding to a later alternative in the alternative set ordering to contain all of the events of any predecessor schedules and furthermore to not introduce any tighter temporal constraints between any pairs of such events. For this purpose we develop the notion of prefix. \subsubsection{Prefixes} As noted earlier, if the structure of the process does not relate two of its alternative schedules in any way, then the choice between them is essentially made at the beginning of time. If we could identify events on separate schedules within a process, this would provide a way of representing a {\em deferred} choice in the sense that an occurrence of an event or collection of events common to both schedules does not necessarily commit the process to be following one or the other of the schedules in question. That is, the choice made between the schedules is not seen to take place until either \begin{itemize} \item we have the occurrence of some event that is present in only one of the schedules \item some event common to both schedules occurs, but the timing of the occurence violates the constraints given by one of the schedules. \end{itemize} However, given that the morphisms we use to identify events have an implicit directionality to them, we prefer to view the general situation of two schedules sharing some collection of events as rather that of two schedules sharing a common predecessor schedule. In other words, we have 3 schedules, one which the process follows ``before'' the choice is made and the other two which represent the possible outcomes of the choice. This view decomposes all choices into predecessor-successor pairs, i.e., all choices are now of the form, ``Either we stay with this schedule or we advance to the next one.'' What remains is to clarify the relationship between a schedule $\:p$ and any successor schedule $\:p'$. In the realm of partial orders, a prefix of an order $\:p$ is a downward closed subset $\:p_0$, i.e., such that for any elements $x,y\in \:p$, if we have that $x\le y$ and $y\in \:p_0$ then $x$ must be in $\:p_0$ as well. This can equivalently be represented as a pullback $$\begin{diagram} \:p & \rInto & \:p'\\ \dTo^! && \dTo_{\chi}\\ \:1 & \rTo^0 & \:2\\ \end{diagram} where \:2 is the usual two-element order $\{0,1|0\le1\}$. The pullback essentially makes \:p be the inverse image under $\chi$ of $0$ and it is the ordering on \:2 that forces the prefix property. One complication in translating this to the more general schedule frameork is that the terminal object \:1 may not actually exist in \%B. For example, this is usually the case if we take $\%B=\%D\hPhys$. Likewise the self-constraints in \:2 will need to be terminal in order for these pullbacks to exist in general. With $\%D\hPhys$ we can, however, consider the corresponding diagram to be in $\%D\hCat$. $\:1$ and $\:2$ thus need only be objects of \%D\hCat. The general definition of prefix is then as follows: \begin{definition} \def\Bc{\overline{\%B}} Given a category of schedules \%B, a {\em prefix classifier} for \%B is a map $k:\:1\to\:2$ in a category $\Bc$ of schedules, such that \begin{itemize} \item $\Bc$ is complete and contains \%B as a full subcategory, \item the inclusion $i:\%B\to\Bc$ is a monoidal functor (i.e., preserves $\otimes$ and \:I) and respects the forgetful functors into {\_\Set}, i.~e., $V_{\Bc}\circ i=V_{\%B}$ \item $\:1$ is a terminal object in $\Bc$. \item $\:2$ is a schedule of $\Bc$ having two events $v$ and $w$, with $\delta_{\:1}(u,u)=\delta_{\:2}(v,v)$, $\delta_{\:2}(w,v)$ initial, $\delta_{\:2}(v,w)$ terminal. \end{itemize} \end{definition} \begin{definition} Given a prefix classifier $k$ for a category of schedules \%B, a morphism $\:p\to\:p'$ is a {\em prefix morphism} (or {\em prefix map}) iff there exists a morphism $\chi:\:p'\to\:2$ such that $$\begin{diagram} \:p & \rInto & \:p'\\ \dTo^! && \dTo_{\chi}\\ \:1 & \rTo^k & \:2\\ \end{diagram} is a pullback. A {\em proper} prefix is one for which the inclusion $\:p\to\:p'$ is not an isomorphism. \end{definition} For preorders {\_2\hCat}, the only choice for \:2 is the usual ordinal, and $k:1\to 2$ is the inclusion of the lower element. This definition of prefix then yields the usual notion of prefix for a preorder ($x$ in the prefix and $y\le x$ imply $y$ in the prefix). \begin{theorem}\label{th:pos} Given a schedule category of the form $\%B=\%D\hPhys$ having a prefix classifier $k:\:1\to\:2$, and a $J$-diagram $(\:f_m:\:p_i\to \:p_j\mathrel)_{(m:i\to j)\in J}$ with colimit $(\iota_i:\:p_i\to \:p)_{i\in J}$, and a morphism $\chi:\:p\to\:2$ or equivalently, a compatible collection of morphisms $\chi_i: \:p_i\to \:2$ (here, $\chi_i=\chi\iota_i$), we have that the diagram $(\:g_m:\:q_i\to \:q_j \mathrel{|} m:i\to j\;{\in J})$, in which $\:q_i$ is the $\chi_i$-prefix of $\:p_i$ and the $\:g_i$ are the canonical maps, has the colimit $(\:q,\kappa_i: \:q_i\to \:q)$, where $\:q$ is the $\chi$-prefix of $\:p$. \end{theorem} \noindent In other words, the colimit of a diagram of prefixes is a prefix of the colimit. Having fixed a particular prefix classifier in the schedule category $\%B$ we can then consider the following class of processes: \begin{definition} A process $P$ is {\em prefix ordered} iff \begin{itemize} \item $A_P$ is a preorder \item For all $p,p'\in A_P$: $p\le p'$ implies $P_{pp'}:P(p)\to P(p')$ is a prefix morphism. \end{itemize} A process morphism $f:P\to Q$ is {\em prefix preserving} iff for each $p\le p'$ in $A_P$, the naturality-of-$f_p$ diagram $$\commdiag {QA_f(p)&\rTo{f_p}{}&P(p)\cr \dIntoA{}{}&&\dIntoA{}{}\cr QA_f(p')&\rTo{f_{p'}}{}&P(p')\cr is a pullback in ${\%B}$. \end{definition} The significance of the pullback requirement for prefix-preserving morphisms is that it eliminates the possibility of an event appearing in an ``early'' alternative of $P$ without appearing in the corresponding ``early'' alternative of $Q$. That is, processes joined by a morphism should agree on the causal precedence of events that they have in common. Prefix ordered processes and prefix preserving morphisms form a category which we will denote (\PROCK{\%B}). \subsubsection{Branching Time} The next observation is that prefix ordered processes allow for the presence of ``useless'' alternatives. \begin{definition} Given a schedule category {\%B} having an initial object $0$ and satisfying the condition $$s\in\ob(\%B)\hbox{there exists some morphism $s\to 0$}\implies\hbox{$s$ is initial in \%B}$$ a {\em branch\-ing-time} process over \%B is a prefix ordered process $P$ for which \begin{itemize} \item $P(p)=0$ implies $p$ initial in $A_P$ \item $p\le p'\in A_P$ implies that either $p=p'$ or the corresponding prefix map $P(p)\hookrightarrow P(p')$ is proper. \end{itemize} \PROCB{\%B} denotes the full subcategory of \PROCK{\%B} consisting of branch\-ing-time processes. \end{definition} The intuitive content here is that in any ordered sequence of alternatives, each successive schedule should ``make progress'' in the sense of each one having additional events not present in its predecessor. One might wish also to merge {\em every} pair of isomorphic schedules within a process, no matter how they are arrived at, thus requiring processes to be actual sets rather than the multisets thus far described. Unfortunately, for some choices of schedule category \%B, it is possible for a pair of schedules to be isomorphic without being {\em canonically} isomorphic. Without further stipulations on the structure of a process, the only case where such a canonical isomorphism is available is that of the initial schedule, $0$, so we can at least merge any initial alternatives that might exist (as we do above). The lack of canonical isomorphisms in general would defeat the construction of process limits as we shall discuss when we present that construction. The condition imposed on \%B is true of all of the examples we have seen thus far (\%D\hCat, \%D\hPhys, $(\%D\hCat\comma\_\Set)$, $(\%D\hPhys\comma\_\Set)$, etc\dots). The conditions on a branch\-ing-time process $P$ together imply that \begin{itemize} \item there is at most one object in $A_P$, which we will call $\bot$ if it exists, for which $P(\bot)=0$. \item $A_P$ is a partial order, for the second condition above enforces the antisymmetry axiom. \end{itemize} Finally, note that morphisms $f:P\to Q$ between such processes will automatically be strict, that is, $A_f(\bot_P)=\bot_Q$. \subsubsection{Closure} For some applications, it will be necessary to be able to infer a particular infinite schedule from the corresponding sequence of finite schedules that have it as a limit. The stipulations for the various types of process presented thus far do not require such infinite schedules to ever exist. \begin{definition} Given a category \%B (of schedules) having all filtered colimits, a branch\-ing-time process $P$ over \%B is {\em closed} iff for any directed subset $S$ of $A_P$, there exists a least upper bound $\bigvee S$ in $A_P$ satisfying $P(\bigvee S)=\colim_{s\in S}P(s)$. A morphism between closed processes $f:P\to Q$ is {\em continuous} iff $A_f$ is continuous, i.~e., preserves joins of directed subsets. We denote the category of closed branch\-ing-time processes and continuous morphisms as \PROCBC{\%B}. \end{definition} Because of the naturality of $f:QA_f\nato P$, we have that for any directed set $S$ in $A_P$, the map $$f_{\bigvee S}:QA_f(\bigvee S)=\colim_{s\in S} QA_f(s) \to \colim_{s\in S}P(s)$$ is necessarily the usual canonical one. Here, the word ``closed'' is being used in its topological sense, that of a closed set containing its limit points, rather than the categorical sense, that of possessing a right adjoint to the tensor product. The latter sense would be meaningless here, given that we do not even view a process $P$ as a category, let alone have a definition of tensor product on $P$. \subsubsection{Labels} If we are considering processes over a category of labeled schedules (e.~g., $\%B=(\%D\hCat\comma\_\Set)$), we have to contend with the orthogonal issue of the alphabet from which the process draws its labels. One generally expects that all of the constituent schedules of a given process will be over the same alphabet. While this can be dealt with simply by taking \%B to be $\%B_{\^S}$, a category of labeled schedules over a single alphabet \^S, this does not allow for morphisms between processes over distinct alphabets as will often arise in the course of composing processes, i.~e., one should not be compelled to specify a full system alphabet in advance. \begin{definition} Given a category of labeled schedules $\%B$, i.~e., with a projection $\sigma:\%B\to\Set$, a process over $\%B$ is {\em alphabetically coherent} if it is also a process over $\%B_{\Sigma}$ for some alphabet $\Sigma$. Given alphabetically coherent processes $P$ over $\%B_{\Sigma}$ and $Q$ over $\%B_{\Gamma}$, a process morphism (in \PROC{B}) $f:P\to Q$ is likewise {\em alphabetically coherent} if there exists a translation $t:\Gamma\to\Sigma$ such that, for all $p\in A_P$, $\sigma(f_p:QA_f(p)\to P(p))=t$. \CPROC{\%B} denotes the subcategory of \PROC{\%B} consisting of alphabetically coherent processes and process morphisms. \end{definition} As noted, this question of alphabetical coherence is orthogonal to the question of the particular alternative-set structure we wish to use. One may thus consider corresponding categories of alphabetically coherent processes, namely the categories of discrete, prefix ordered, branch\-ing-time or closed branch\-ing-time such processes, respectively ${\cal P}^{\bullet}_d[\%B]$, ${\cal P}^{\bullet}_k[\%B]$, ${\cal P}^{\bullet}_{k\bot}[\%B]$, and \CPROCBC{\%B}. \section{Properties} \subsection{Pointwise Lifting of Schedule Functors} {\em Pointwise lifting} refers to the notion of taking a unary schedule operation and applying it uniformly to all of the constituent schedules of a given process. We need to establish what we mean by ``uniformly'' and present sufficient conditions on the schedule operation so that such a lifting will be possible. Essentially, if we are given a functor $F:\%B\to\%B'$ between two categories of schedules, we can apply this functor to all of the constituent schedules of a given process over $\%B$ to yield a corresponding process over $\%B'$. \begin{definition} Given a functor $F:\%B\to\%B'$, the {\em lifted} functor $F_{\#}:\PROC{\%B}\to\PROC{\%B'}$ is defined as follows: For any process $P\in\PROC{\%B}$, $F_{\#}P$ is given by $A_{F_{\#}P}=A_P$ (i.~e., same index set) and $F_{\#}P(:A_P\to\%B')=FP$. For any morphism $f:P\to Q$, $F_{\#}f$ is given by $A_{F_{\#}f}=A_f$ (i.~e., same index function) and $(F_{\#}f)_p (:FQA_f(p)\to FP(p))=Ff_p$. \end{definition} \nopagebreak \noindent Functoriality of $F_{\#}$ follows almost immediately from functoriality of $F$. Similarly, natural transformations lift as well. \nopagebreak \begin{definition} Given a natural transformation $\^h:F\nato G:\%B\to\%B'$, the {\em lifted} natural transformation $\^h_{\#}:G_{\#}\nato F_{\#}:\PROC{\%B}\to\PROC{\%B'}$ is defined by specifying the morphisms $\^h_{\#P}:G_{\#}P\to F_{\#}P$ as follows \begin{itemize} \item $A_{\^h_{\#P}}=1_{A_P}$, recalling that $A_P=A_{G_{\#}P}=A_{F_{\#}P}$. \item $(\^h_{\#P})_p = \^h_{P(p)}$ \end{itemize} \end{definition} As one might expect, naturality of $\^h_{\#}$ follows almost immediately from naturality of $\^h$. Applying this lifting to the unit and counit transformations of an adjunction, we get \begin{proposition}\label{prop:liftadj} If $F:\%B\to\%B'$ is the left (right) adjoint of $G:\%B'\to\%B$, then $F_{\#}:\PROC{\%B}\to\PROC{\%B'}$ is the right (left) adjoint of $G_{\#}:\PROC{\%B'}\to\PROC{\%B}$. \end{proposition} \begin{proofo} If $F$ is the left adjoint of $G$, we have unit $\^h:1_{\%B}\nato GF$ and counit $\^e:FG\nato 1_{\%B'}$ natural transformations satisfying the usual triangular identities \begin{eqnarray*} \^e_{F\:X}\circ F\^h_{\:X}&=&1_{F\:X}\\ G\^e_{\:Y}\circ\^n_{G\:Y}&=&1_{G\:Y}\\ \end{eqnarray*} These transformations lift to $\^h_{\#}:(GF)_{\#}\nato 1_{\PROC{\%B}}$ and $\^e_{\#}:1_{\PROC{\%B'}}\nato (FG)_{\#}$. Noting that $((GF)_{\#}=G_{\#}F_{\#})$ and $((FG)_{\#}=F_{\#}G_{\#})$, the triangular identities can likewise be shown to lift as well to \begin{eqnarray*} F_{\#}\^h_{\#X}\circ\^e_{\#F_{\#}X}&=&1_{F_{\#}X}\\ \^n_{\#G_{\#}Y}\circ G_{\#}\^e_{\#Y}&=&1_{G_{\#}Y}\\ \end{eqnarray*} which suffices to demonstrate that $F_{\#}$ is a right adjoint of $G_{\#}$ with $\^e_{\#}$ and $\^h_{\#}$ being the unit and counit, respectively. \end{proofo} In particular, for the case of \%B being a category of labeled schedules and for a given alphabet translation $t:\Gamma\to\Sigma$, we recall the corresponding translation and restriction schedule functors $t_*:\%B_{\Gamma}\to\%B_{\Sigma}$ and $t^*:\%B_{\Sigma}\to\%B_{\Gamma}$. We can then lift these to process functors $t_{\#}:\PROC{\%B_{\Gamma}}\to\PROC{\%B_{\Sigma}}$ and $t^{\#}:\PROC{\%B_{\Sigma}}\to\PROC{\%B_{\Gamma}}$ in the above manner. As a matter of simplifying the notation, it is understood that $t_{\#}$ and $t^{\#}$ are abbreviations for $t_{*\#}$ and $t^*_{\#}$ respectively. Given the adjunction $t_*\dashv t^*$, we can then infer a corresponding adjunction $t^{\#}\dashv t_{\#}$ from Proposition \ref{prop:liftadj}. In the subcategory of prefix ordered processes, we need additional conditions on the supplied functor $F:\%B\to\%B'$ in order that the resulting lifted functor $F_{\#}:\PROCK{\%B}\to\PROCK{\%B'}$ indeed maps into \PROCK{\%B'}, i.~e., that the constructions for $F_{\#}P$ and $F_{\#}f$ indeed produce a prefix ordered process and a prefix preserving morphism when given prefix ordered $P$ and prefix preserving $f$. The following set of conditions is sufficient to guarantee this: \begin{itemize} \item $F$ preserves pullbacks. \nobreak \item \nobreak If $k:1\to 2$ is the prefix classifier for \%B, then $Fk:F1\to F2$ is a prefix in $\%B'$. \end{itemize} Likewise, a natural transformation $\^h:F\nato G:\%B\to\%B'$ of schedule functors will only lift if its naturality diagram $$\commdiag {Fs&\rTo{\^h_s}{}&Gs\cr \dIntoA{}{}&&\dIntoA{}{}\cr Fs'&\rTo{\^h_{s'}}{}&Gs'\cr is a pullback in the case where the $s\to s'$ morphism is a prefix. This is required in order for $\^h_{\#P}$ to actually be a prefix preserving morphism for any $P\in\PROCK{\%B}$. Sufficient conditions to guarantee this are the aforementioned conditions on $F$ together with the condition that $$\commdiag {F1&\rTo{\^h_s}{}&G1\cr \dIntoA{Fk}{}&&\dIntoA{}{Gk}\cr F2&\rTo{\^h_{s'}}{}&G2\cr be a pullback. In the case of branch\-ing-time processes, though we don't need any additional conditions on $F$ beyond that for the prefix ordered case, there is more to do to lift a functor $F:\%B\to\%B'$ to a corresponding {\em induced} functor $F_{\#\bot}:\PROCB{\%B}\to\PROCB{\%B'}$. For any process $P\in\PROCB{\%B}$, we can construct an equivalence relation $R$ on $A_P$ generated by the set of all pairs $(p,p')$ in $A_P$ for which \begin{itemize} \item $p\le p'$ and $FP(p\to p')$ is an isomorphism, or \item $FP(p)$ and $FP(p')$ are both initial. \end{itemize} Then the lifted {\em branching time} process, which we shall also denote $F_{\#}P$, is then a process whose alternatives are given by the quotient category $A_P/R$. We then take a representative $p$ of each $R$-equivalence class $[p]$ and have $F_{\#}P:A_P/R\to\%B$ be the functor taking $[p]$ to $FP(p)$. This construction is not canonical, since for any particular equivalence class of alternatives, there may be a variety of choices for p, though the functoriality of $P:A_P\to\%B$ guarantees a canonical isomorphism between any pair of such choices. Thus, all of the possible $F_{\#}P$'s we can construct will themselves be canonically isomorphic. For any morphism $f:P\to Q$, showing that there exists a well-defined morphism $F_{\#}f:F_{\#}P\to F_{\#}Q$ is a matter of showing that $A_f$ maps $R$-equivalent alternatives of $A_P$ to equivalent alternatives of $A_Q$. We have already noted that $A_f$ is strict which handles the case of alternatives equivalent by virtue of being initial. The case of noninitial $p\le p'$ and $FP(p\to p')$ being an isomorphism, is covered by $F$ preserving prefixes and prefix pullbacks, i.~e., the pullback in the naturality of $f$ diagram is preserved, thus guaranteeing that $FQ(A_fp\to A_fp')$ is also an isomorphism. Lifting a natural transformation $\^h:F\nato G$, is similarly complicated but likewise involves no additional stipulations. In the general prefix ordered case, we could rely on $A_P=A_{G_{\#}P}=A_{F_{\#}P}$, and thus have $A_{\^h_{\#P}}$ be the identity. In the branching time case, various alternatives of $A_{F_{\#}P}$ and $A_{G_{\#}P}$ will be identified in which case we need to ensure that we can still define the map $A_{\^h_{\#P}}:A_{G_{\#}P}\to A_{F_{\#}P}$. The essential observation is that if two alternatives $p$ and $p'$ of $A_P$ are identified in $A_{G_{\#}P}$, this happens in one of the following two ways: \begin{itemize} \item $GPp$ and $GPp'$ are both initial, in which case $FPp$ and $FPp'$ have to be as well, \item $p\le p$ and $GPp\hookrightarrow GPp'$ is an isomorphism, but then, since $$\commdiag {FPp&\rTo{\^h_{Pp}}{}&GPp\cr \dIntoA{}{}&&\dIntoA{}{}\cr FPp'&\rTo{\^h_{Pp'}}{}&GPp'\cr is a pullback, we have $FPp\hookrightarrow FPp'$ being an isomorphism as well. \end{itemize} In each case, it is seen that the offending alternatives must also be identified in $A_{F_\#P}$. Therefore the identity on $A_P$ factors through the quotient to yield the desired alternative-set map $A_{\^h_{\#P}}:(A_P/R_G=A_{G_{\#}P})\to (A_P/R_F=A_{F_{\#}P})$ In the case of closed branch\-ing-time processes $F$ lifts as for branch\-ing-time processes. Hence, we need only that $F$ preserves filtered colimits to ensure that the $F_{\#}(P)$ will be closed. \subsection{Process Limits and Coproducts} We now turn to the question of limits and colimits in the process category, starting with a result about the general process category \PROC{\%B}. As a preliminary observation, notice that there is a forgetful functor $A:\PROC{\%B}\to\_\Cat$ that discards the schedule information, i.~e., takes a process $P$ to its alternative set (category, actually) $A_P$ and a morphism $f:P\to Q$ likewise to its alternative set map $A_f:A_P\to A_Q$. \begin{lemma}\label{lem:aladj} If \%B has a terminal object, $A:\PROC{\%B}\to\_\Cat$ has a left adjoint. \end{lemma} \begin{proof} Define $L:\_\Cat\to\PROC{\%B}$ as follows: For any category $C$, the process $L_C$ is that whose alternative category $A_{L_C}$ is $C$ and whose schedules are all terminal, i.~e., $L_Cc=1_{\%B}$ for all $c\in\ob(C)$ with morphisms of $C$ going to the canonical terminal schedule maps. That this is a left adjoint is shown by exhibiting, for any $C\in\_\Cat$, a universal arrow $C\to A_{L_C}(=C)$ which we take to be the identity. For any process $P\in\PROC{\%B}$ and any functor $f:C\to A_P$, we take $g:L_C\to P$ to be such that $A_g=f$ and such that the associated schedule maps $g_c:Pfc\to L_Cc(=1)$ be the terminal map. Clearly, $g$ is the only morphism for which $A_g\circ 1_C=f$. \end{proof} From this, we learn that the $A$ functor preserves limits, i.~e., the alternative category of a limit of processes will necessarily be the limit of the corresponding alternative categories in \_\Cat. \begin{theorem}\label{thm:plim} For any category $\%B$ having all colimits of type $J^{op}$, \PROC{\%B} has all limits of type $J$. \end{theorem} \begin{proof} \mathcode`\|="326A \mathcode`\;="303B \mathcode`\:="603A Consider a diagram $J$ in $\PROC{\%B}$ with processes $(P^i| i\in J)$ and morphisms $(f^m:P^i\to P^j|m:i\to j\in J)$. Just to provide names we will denote the limit as $(P; \pi^i:P\to P^i | i \in J)$ (that is, $P$ is the object and the $\pi^i$'s are the projections). \setlength{\unitlength}{0.0088in} \begin{picture}(263,186)(-115,635) \thicklines \put(285,780){\vector( 4, 1){0}} \multiput( 85,730)(13.33333,3.33333){15}{\makebox(0.6333,0.9500){\tenrm .}} \put(285,680){\vector( 4,-1){0}} \multiput( 85,730)(13.33333,-3.33333){15}{\makebox(0.6333,0.9500){\tenrm .}} \put(195,785){\vector( 2, 1){0}} \multiput( 85,730)(12.22222,6.11111){9}{\makebox(0.6333,0.9500){\tenrm .}} \put(195,675){\vector( 2,-1){0}} \multiput( 85,730)(12.22222,-6.11111){9}{\makebox(0.6333,0.9500){\tenrm .}} \put(235,790){\vector( 1, 0){ 70}} \put(235,670){\vector( 1, 0){ 70}} \put(225,775){\vector( 1,-1){ 80}} \put(195,745){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\pi^3$}}} \put(110,765){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\pi^1$}}} \put(115,685){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\pi^2$}}} \put(195,710){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\pi^4$}}} \put(260,645){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$f^{III}$}}} \put(275,735){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$f^{II}$}}} \put(265,800){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$f^{I}$}}} \put( 65,725){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P$}}} \put(205,785){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^1$}}} \put(205,665){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^2$}}} \put(315,785){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^3$}}} \put(320,665){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^4$}}} \end{picture} \noindent From the lemma, we know that the alternative category $A_P$ of the limit is obtained by taking the limit of the diagram $(A_{P^i};A_{f^m})$ in $\_\Cat$. This limit in {\_\Cat} also gives us a collection of projections which we can use for the alternative portion ($A_{\pi^i}:A_P\to A_{P^i}$) of the morphisms $\pi^i$. As a consequence of the construction of limits in \_\Cat, a typical object $p$ of $A_P$ will be a cone $(p_i:\{\bullet\}\to A_{P^i} | i\in J)$, or equivalently, a collection of objects $(p_i\in A_{P^i})_{i\in J}$ such that $p_j=A_{f^m}p_i$ for all $m:i\to j$ in $J$. \setlength{\unitlength}{0.0088in} \begin{picture}(366,250)(-100,621) \thicklines \put(320,775){\circle{50}} \put(325,660){\circle{58}} \put(185,670){\circle{50}} \put(210,810){\circle{58}} \put( 70,730){\circle{60}} \thinlines \put(321,789){\vector( 4, 1){0}} \multiput( 85,730)(13.33333,3.33333){18}{\makebox(0.6333,0.9500){\tenrm .}} \put(321,671){\vector( 4,-1){0}} \multiput( 85,730)(13.33333,-3.33333){18}{\makebox(0.6333,0.9500){\tenrm .}} \put(201,788){\vector( 2, 1){0}} \multiput( 85,730)(12.00000,6.00000){10}{\makebox(0.6333,0.9500){\tenrm .}} \put(201,672){\vector( 2,-1){0}} \multiput( 85,730)(12.00000,-6.00000){10}{\makebox(0.6333,0.9500){\tenrm .}} \put(210,790){\vector( 1, 0){110}} \put(205,670){\vector( 1, 0){115}} \put(210,790){\vector( 1,-1){115}} \put( 80,730){\circle*{5}} \put(205,790){\circle*{5}} \put(200,670){\circle*{5}} \put(320,790){\circle*{5}} \put(325,670){\circle*{5}} \put(260,800){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{f^I}$}}} \put(225,730){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{f^{II}}$}}} \put(245,650){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{f^{III}}$}}} \put(145,805){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^1}$}}} \put(125,660){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^2}$}}} \put(350,770){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^3}$}}} \put(355,650){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^4}$}}} \put( 15,725){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_P$}}} \put( 70,715){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$p$}}} \put(205,800){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$p_1$}}} \put(175,660){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$p_2$}}} \put(315,770){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$p_3$}}} \put(325,655){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$p_4$}}} \end{picture} There will then be, for any such $p$, a corresponding collection of objects $P^ip_i$ in {\%B} and morphisms $f^m_{p_i}:P^jA_{f^m}p_i(=P^jp_j)\to P^ip_i$ for all $m:i\to j$ in $J$. This latter collection is, in fact, a diagram of type $J^{op}$ in $\%B$, which, by hypothesis, has a colimit $(Pp; \pi^i_p:P^ip_i\to Pp)$. This provides the object map for the functor $P$ as well as the definition for the natural transformation $\pi^i:P^iA_{\pi^i}\nato P$. \setlength{\unitlength}{0.0088in} \begin{picture}(366,450)(0,421) \thicklines \put(320,775){\circle{50}} \put(325,660){\circle{58}} \put(185,670){\circle{50}} \put(210,810){\circle{58}} \put( 70,730){\circle{60}} \thinlines \put(321,789){\vector( 4, 1){0}} \multiput( 85,730)(13.33333,3.33333){18}{\makebox(0.6333,0.9500){\tenrm .}} \put(321,671){\vector( 4,-1){0}} \multiput( 85,730)(13.33333,-3.33333){18}{\makebox(0.6333,0.9500){\tenrm .}} \put(201,788){\vector( 2, 1){0}} \multiput( 85,730)(12.00000,6.00000){10}{\makebox(0.6333,0.9500){\tenrm .}} \put(201,672){\vector( 2,-1){0}} \multiput( 85,730)(12.00000,-6.00000){10}{\makebox(0.6333,0.9500){\tenrm .}} \put(210,790){\vector( 1, 0){110}} \put(205,670){\vector( 1, 0){115}} \put(210,790){\vector( 1,-1){115}} \put( 80,730){\circle*{5}} \put(210,790){\circle*{5}} \put(205,670){\circle*{5}} \put(325,790){\circle*{5}} \put(325,670){\circle*{5}} \put(145,805){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^1}$}}} \put(125,660){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^2}$}}} \put(350,770){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^3}$}}} \put(355,650){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^4}$}}} \put( 15,725){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_P$}}} \put( 70,715){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$p$}}} \put(205,800){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$p_1$}}} \put(175,660){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$p_2$}}} \put(315,770){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$p_3$}}} \put(325,655){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$p_4$}}} \thinlines \put(210,790){\vector( 1, -4){45}} \put(205,670){\vector( 1, -4){45}} \put(325,790){\vector( 1, -4){45}} \put(325,670){\vector( 1, -4){45}} \thicklines \put(167,538){\vector(-4,-1){0}} \multiput(175,540)(13.33333,3.33333){14}{\makebox(0.6333,0.9500){\tenrm .}} \put(167,522){\vector(-4, 1){0}} \multiput(175,520)(13.33333,-3.33333){14}{\makebox(0.6333,0.9500){\tenrm .}} \put(151,538){\vector(-2,-1){0}} \multiput(159,542)(12.00000,6.00000){8}{\makebox(0.6333,0.9500){\tenrm .}} \put(151,522){\vector(-2, 1){0}} \multiput(159,518)(12.00000,-6.00000){8}{\makebox(0.6333,0.9500){\tenrm .}} \put(360,590){\vector(-1, 0){70}} \put(360,470){\vector(-1, 0){75}} \put(365,485){\vector(-1, 1){90}} \put(310,602){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$f^I_{p_1}$}}} \put(275,530){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$f^{II}_{p_1}$}}} \put(305,450){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$f^{III}_{p_2}$}}} \put(120,525){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$Pp$}}} \put(245,585){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^1p_1$}}} \put(365,585){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^2p_2$}}} \put(245,465){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^3p_3$}}} \put(365,465){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^4p_4$}}} \put(180,565){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\^p^1_p$}}} \put(180,495){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\^p^3_p$}}} \end{picture} To get the action of $P$ on morphisms, recall that a morphism $s:p\to p'$ in $A_P$ will be a collection of maps $s_i:p_i\to p'_i$ in $A_{P^i}$ such that $s_j=A_{f^m}s_i$ for all $m:i\to j$ in $J$. As with objects, we can consider the associated collection of morphisms $P^is_i:P^ip_i\to P^ip'_i$ in $\%B$. These constitute a natural transformation between the $J^{op}$-diagrams associated with $p$ and $p'$, since $$\commdiag {P^jp_j=P^jA_{f^m}p_i&\rTo{f^m_{p_i}}{}&P^ip_i\cr \dTo{P^js_j}{=P^jA_{f^m}s_i}&&\dTo{}{P^is_i}\cr P^jp'_j=P^jA_{f^m}p'_i&\rTo{f^m_{p'_i}}{}&P^ip'_i\cr commutes by naturality of $f^m$. Given any such transformation between the diagrams, there is a corresponding unique (canonical) morphism between the colimits $Ps:Pp\to Pp'$ making everything commute. The reader may verify that $\pi^i:P^iA_{\pi^i}\nato P$ is a natural transformation. Given some other cone $(Q; \^t^i:Q\to P^i | i \in J)$, we now need to exhibit a unique morphism $\^x:Q\to P$ factoring this through the limit cone. We note that the limithood of $(A_P; A_{\pi^i}:A_P\to A_{P^i} | i \in J)$ in $\_\Cat$ provides us with a unique map $A_{\^x}:A_Q\to A_P$ in \_\Cat. Then, for every alternative $q\in A_Q$, there is a collection of alternatives $(q_i=A_\^t^iq\in P^i)_{i\in J}$ and then a corresponding schedule diagram with objects $(P^iq_i)_{i\in J}$ and morphisms $f^m_{q_i}:P^jq_j\to P^iq_i$ for all $m:i\to j$ in $J$. \setlength{\unitlength}{0.0088in} \begin{picture}(423,641)(13,201) \thicklines \put( 45,775){\circle{64}} \put(240,530){\circle{72}} \put(125,590){\circle{60}} \put(265,670){\circle{58}} \put(380,520){\circle{58}} \put(375,635){\circle{50}} \put( 25,770){\circle*{5}} \put(145,590){\circle*{5}} \put(265,650){\circle*{5}} \put(265,530){\circle*{5}} \put(380,650){\circle*{5}} \put(380,530){\circle*{5}} \multiput( 25,770)(21.71648,-32.57473){6}{\line( 2,-3){ 11.418}} \put(145,590){\vector( 2,-3){0}} \multiput( 25,770)(11.25000,-7.50000){33}{\makebox(0.6333,0.9500){\tenrm .}} \multiput( 25,770)(12.85714,-4.28571){29}{\makebox(0.6333,0.9500){\tenrm .}} \multiput( 25,770)(9.60000,-9.60000){26}{\makebox(0.6333,0.9500){\tenrm .}} \multiput( 25,770)(12.00000,-6.00000){21}{\makebox(0.6333,0.9500){\tenrm .}} \put(145,590){\vector( 4, 1){240}} \put(145,590){\vector( 4,-1){240}} \put(145,590){\vector( 2,-1){120}} \put(145,590){\vector( 2, 1){120}} \put(265,650){\vector( 1,-1){120}} \put(265,530){\vector( 1, 0){115}} \put(265,650){\vector( 1, 0){115}} \put(255,660){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$q_1$}}} \put(245,515){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$q_2$}}} \put(370,630){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$q_3$}}} \put(375,512){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$q_4$}}} \put(210,715){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{\^t^3}$}}} \put( 60,665){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{\^x}$}}} \put(120,577){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{\^x}q$}}} \put( 30,780){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$q$}}} \put( 65,585){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_P$}}} \put(205,660){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^1}$}}} \put(175,520){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^2}$}}} \put(410,640){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^3}$}}} \put(415,520){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^4}$}}} \multiput( 36,453.5)(21.71648,-32.57473){5}{\line( 2,-3){ 11.418}} \multiput( 47.5,455)(11.25000,-7.50000){31}{\makebox(0.6333,0.9500){\tenrm .}} \multiput( 50.71428,461.42857)(12.85714,-4.28571){27}{\makebox(0.6333,0.9500){\tenrm .}} \multiput( 44.2,450.8)(9.60000,-9.60000){24}{\makebox(0.6333,0.9500){\tenrm .}} \multiput( 49,458)(12.00000,-6.00000){19}{\makebox(0.6333,0.9500){\tenrm .}} \put( 36,453.5){\vector(-2,3){0}} \put( 47.5,455){\vector(-3,2){0}} \put( 50.71428,461.42857){\vector(-3,1){0}} \put( 44.2,450.8){\vector(-1,1){0}} \put( 49,458){\vector(-2,1){0}} \put(377,348){\vector(-4,-1){200}} \put(377,232){\vector(-4, 1){195}} \put(255,235){\vector(-2, 1){85}} \put(255,345){\vector(-2,-1){90}} \put(375,240){\vector(-1, 1){95}} \put(377,230){\vector(-1, 0){77}} \put(377,350){\vector(-1, 0){77}} \put( 20,465){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$Qq$}}} \put(130,285){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$PA_{\^x}q$}}} \put(260,345){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^1q_1$}}} \put(260,225){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^2q_2$}}} \put(380,345){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^3q_3$}}} \put(380,225){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^4q_4$}}} \put(210,415){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\^t^3_q$}}} \put(185,335){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\^p^1_{A_{\^x}q}$}}} \put(190,240){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\^p^2_{A_{\^x}q}$}}} \put( 70,365){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\^x_q$}}} \end{picture} This is a diagram we've already seen, namely the diagram under the single point cone $A_{\^x}q$ whose colimit is $(PA_{\^x}q; \pi^i_{A_{\^x}q}P^i{q_i}\to PA_{\^x}q)$. Now $Qq$ and the various $\^t^i_q$ provide us a co-cone over this diagram, so there is a unique map $\^x_q:PA_{\^x}q\to Qq$ factoring this co-cone through the colimit. The provision of these $\^x_q$ for all alternatives $q\in A_q$ completes the definition of $\^x:Q\to P$ and it should be clear that $\^x$ is unique. \end{proof} \vbox {Considering all possible diagrams $J$ collectively, we obtain \begin{corollary} $\%B$ cocomplete implies $\PROC{\%B}$ complete. \end{corollary}} Recall, however, that some of the schedule categories we use, $\%D\hPhys$ in particular, are not necessarily cocomplete. Fortunately, we can obtain a stronger result applicable to schedule categories that satisfy a weaker condition than cocompleteness. In the above proof, consider the case of a particular diagram of schedules for which there exists no co-cone at all. Suppose now that this very diagram arises in the course of a limit construction, say as the schedule diagram underlying some cone $p$ of $A_P$. Then, for any process cone $(Q; \^t^i:Q\to P^i | i \in J)$, it will not be possible for any alternative $q$ of $A_Q$ to be mapped by the resulting $A_{\^x}$ into the cone $p$, i.~e., to have $A_{\^t^i}q=p_i$ for all $i \in J$. If it were possible, then there would be a corresponding co-cone down in the schedule category $\^t^i_q:P^ip^i\to Qq$ even though none exists for that particular diagram. Thus, if the troublesome alternative $p$ were to be left out of $A_P$, it would still be the case that all cones $Q$ over the process diagram would factor through $P$. In the case of a category \%B which is {\em almost co-complete}, we need only revise our limit construction as follows. Construct the limit category $A'$ over the diagram of $(A_{P^i};A_{f^m})$ in $\_\Cat$. Recall that the objects of $A'$ are single-point cones over $(A_{P^i};A_{f^m})$. Take $A_P$ to be that subcategory of $A'$ consisting only of those single-point cones $p=(p^i\in A_{P^i})_{i\in J}$ for which the \%B-diagram $(P^ip^i;f^m_{p_i}:P^jp_j\to P^ip_i)$ has a co-cone and hence, by almost-cocompleteness, a colimit as well. Given this choice of $A_P$, for all $p\in A_P$ we take $Pp$ to be the colimit schedule as before. Note that in this case $A_P$ is not necessarily the limit of the $A_{P^i}$ in {\_\Cat} but rather a subcategory thereof, contrary to what we inferred from Lemma \ref{lem:aladj}. This is not a contradiction; Lemma \ref{lem:aladj} requires \%B to have a terminal object, which is not the case for, e.~g., \%B=\%D\hPhys. Indeed if \%B were to have a terminal object, then almost co-completeness would imply completeness since the terminal object provides {\em every} diagram with a cocone. \vbox {To summarize: \begin{theorem}\label{thm:plima} If $\%B$ is almost $J^{op}$-cocomplete, \PROC{\%B} is $J$-complete. \end{theorem}} Since {\%B} invariably has an initial object, \PROC{\%B} likewise has a terminal object $1$ having a single alternative $\bot$ mapping to the initial behavior. Essentially, this is the {\sc Skip} (or {\sc Idle}) process having a single behavior in which nothing happens. In the situation where \%B a is category of labeled schedules, if we wish to construct a limit of alphabetically coherent processes in \CPROC{\%B}, we can use the above construction provided the resulting limit process can be shown to be itself alphabetically coherent. \begin{corollary} If $\%B$ is almost $J^{op}$-cocomplete and $\sigma:\%B\to\Set$ preserves $J$-colimits, \CPROC{\%B} is $J$-complete. \end{corollary} \begin{proofo} Given a diagram of type $J$ in \PROC{\%B} all of whose objects and morphisms are coherent, the limit (together with all of the projections) will turn out to be coherent since every diagram of behaviors of which we have to take a colimit has the same underlying diagram of alphabets. The preservation of colimits by $\sigma$ means that the alphabet of the colimit and the various injections into it are the same in all cases. \end{proofo} It should be noted that, in the case of $\%B=\Pom$, $\cod:\Pom\to\Set$ does not preserve certain coequalizers \cite{CCMP} and thus fails the conditions of this theorem. One can indeed construct counterexamples in which a limit of pomset processes in $\PROC{\_\Pom}$ is not alphabetically coherent. These problems do not arise if we instead were to use $\_\Pom_{<}$, the category of pomsets with strict morphisms. Though $\_\Pom_{<}$ is not cocomplete like \_\Pom, it is however {\em almost}-cocomplete. Furthermore, $\^s:\Pom_{<}\to\Set$ preserves colimits in those cases where they actually exist, thus making \CPROC{\_\Pom_{<}} complete. \begin{corollary} If $\%B$ is almost $J^{op}$-cocomplete, \nobreak \begin{enumerate} \item\label{it:d} $\PROCD{\%B}$ is $J$-complete. \item\label{it:k} $\PROCK{\%B}$ is $J$-complete. \item\label{it:b} $\PROCB{\%B}$ is $J$-complete. \item\label{it:bc} $\PROCBC{\%B}$ is $J$-complete. \end{enumerate} \end{corollary} \begin{proof} (\ref{it:d}) follows from the fact that a limit over a diagram of discrete categories is necessarily discrete, since the existence of a nontrivial morphism between single-point cones implies a nontrivial morphism somewhere in the diagram. For (\ref{it:k}) this follows immediately from prefixes preserving colimits (Theorem \ref{th:pos}). For (\ref{it:b}), it suffices to demonstrate that the two conditions on branch\-ing-time processes hold for the limit process, provided they hold for all of the processes in the diagram. For this we use the notation from the proof of Theorem \ref{thm:plim}. Since $s\to 0$ in {\%B} implies $s$ initial, a noninitial object $Pp^i$ in a colimit diagram guarantees the colimit schedule $Pp$ to be noninitial via the injection $P^i\^p^i$. Thus there can be at most one initial alternative in the limit process $P$ by virtue of there being at most one initial alternative in each of the processes $P^i$. To establish the condition that $P$ takes nontrivial alternative orderings to proper prefixes, we note that having the injection $\^p^i:P\to P^i$ be a prefix preserving morphism means that \begin{diagram} P^ip^i&\rTo{\^p^i_p}{}&Pp\\ \dIntoA{}{}&&\dIntoA{}{}\\ P^i{p'}^i&\rTo{\^p^i_{p'}}{}&Pp'\\ \end{diagram} is a pullback and thus if $P^ip^i\hookrightarrow P^i{p'}^i$ is a proper prefix for some $i\in J$, then so must $Pp\hookrightarrow Pp'$. For (\ref{it:bc}), it suffices to recall that colimits commute with each other (see Mac~Lane \cite{Mac}, ch.~9). \end{proof} \noindent Similar results for the coherent process categories follow in much the same way. It will also be of interest to know when lifted schedule functors preserve process limits so we should also mention \begin{corollary} Given any functor $F:\%B\to\%B'$ preserving colimits of type $J^{op}$, $F_{\#}:\PROC{\%B}\to\PROC{\%B'}$ preserves limits of type $J$. \end{corollary} \noindent This follows immediately from the construction of process limits. The corresponding results for the special cases also hold. \begin{corollary} Given any functor $F:\%B\to\%B'$ preserving prefixes and colimits of type $J^{op}$, \begin{itemize} \item $F_{\#}:\PROCK{\%B}\to\PROCK{\%B'}$ preserves limits of type $J$. \item $F_{\#}:\PROCB{\%B}\to\PROCB{\%B'}$ preserves limits of type $J$. \item if $F$ preserves all filtered colimits, $F_{\#}:\PROCBC{\%B}\to\PROCBC{\%B'}$ preserves limits of type $J$. \end{itemize} \end{corollary} We now consider the question of colimits in the process categories. In the case of coproducts, at least, the results are even simpler than for limits. \begin{theorem} For {\em any} category $\%B$, \PROC{\%B} has an initial object and all (small) coproducts. \end{theorem} \begin{proof} Given a $J$-indexed collection of processes $\{P^i\in\PROC{\%B}:i\in J\}$, by the cocompleteness of $\_\Cat$, we can construct $A_P=\coprod_{i\in J} A_{P^i}$ with injections $A_{\^i^i}:A_{P^i}\to A_P$. Since this is a coproduct in $\_\Cat$, there exists a unique $P:A_P\to \%B$ for which $PA_{\^i^i}=P^i$ which we take to be the coproduct process. \begin{center} \setlength{\unitlength}{0.0088in} \begin{picture}(346,215)(-130,635) \thicklines \put(165,755){\circle{64}} \put( 55,700){\circle{50}} \put( 55,810){\circle{40}} \multiput(165,725)(0.00000,-11.11111){5}{\line( 0,-1){ 5.556}} \put(165,675){\vector( 0,-1){0}} \put( 75,690){\vector( 2,-1){ 60}} \put( 65,795){\vector( 2,-3){ 80}} \put( 75,710){\vector( 2, 1){ 60}} \put( 75,800){\vector( 2,-1){ 60}} \put( 60,755){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^1$}}} \put( 95,650){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P^2$}}} \put( 95,800){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{\^i^1}$}}} \put( 90,700){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{\^i^2}$}}} \put(170,700){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{!=$P$}}} \put(205,760){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_P$}}} \put( 0,785){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^1}$}}} \put( 0,720){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$A_{P^2}$}}} \put(160,645){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\%B$}}} \end{picture} \end{center} \noindent We then construct the process injections $\^i^i:P^i\to P$ using the alternative maps $A_{\^i^i}$ together with the identity natural transformation $PA_{\^i^i}=P^i$. That $(P,\^i^i)_{i\in J}$ has the required universal property is straightforward. \end{proof} Since $A_P$ introduces no morphisms between alternatives in $A_{P^i}$ and $A_{P^j}$ for $i\ne j$, prefix closure of each of the $P^i$ implies prefix closure of $P$. The morphisms $\^i^i$ are all prefix preserving in a trivial way. In the case of branching time processes, we have to modify the construction in the case where more than one $A_{P^i}$ contains an initial element $\bot$; these become identified in $A_P$. Since there is already a stipulation about the schedules that may be associated with $\bot$ by $P^i$ if $P^i\in\PROCB{\%B}$, i.~e., only the initial schedule, the functor $P$ will continue to be well-defined and serve as a colimit in \PROCB{\%B}. For \PROCB{\%B} we need to observe that a filtered diagram of objects in $A_P$ must lie entirely within the image of one of the $A_{\^i^i}$. Given this, the closure of $P$ follows from the closure of the $P^i$. These observations suffice to show \begin{corollary} \PROCK{\%B}, \PROCB{\%B} and \PROCBC{\%B} have an initial object and all small coproducts. \end{corollary} Notice that in order for coproducts to exist, there are no requirements on $\%B$ aside from those we have already made for the sake of being able to construct \PROCK{\%B}, \PROCB{\%B} and \PROCBC{\%B} at all. The existence of coproducts relies, in some sense, strictly on the properties of \_\Cat. The initial process $0$ is that for which the alternative set is empty. Essentially, this is the deadlocked {\sc Stop} (or {\sc Halt}) process that cannot do anything at all and thus is quite distinct from the {\sc Skip} process which is at least {\em able to do nothing}, i.~e., in the sense of being able to exhibit an idle behavior. Coequalizers in the process categories, where they exist at all do not, as far as we know, have any particularly pleasant characterization. It is fortunate that we have no particular use for them. \section{Process Operations} There is now sufficient machinery to describe some basic operations on processes analogous to those given for schedules. We have essentially three tools for defining operations on processes. \begin{itemize} \item Categorical limits and colimits, which we shall use to provide definitions for the concurrence and choice operations on processes. \item Pointwise lifting of schedule functors, which we have already seen for describing the lifting of alphabet renaming and restriction functors in the case of processes over labeled schedules. Pointwise lifting will also be used to describe process orthocurrence. \item Iteration, i.~e., fixpoint constructions. \end{itemize} \subsection{Choice} The union or {\em choice} operation is the one operation that is entirely new with the introduction of processes. Intuitively, the process $P+Q$ is that which may either behave like $P$ or like $Q$. We see almost immediately that the coproduct in the process category essentially fills this role by taking the union of all of the alternative sets of the processes concerned without introducing any additional structure within the component processes, except in the case of branch\-ing-time processes (\PROCB{\%B} or \PROCBC{\%B}), where we may need to merge initial alternatives. Clearly the {\sc Stop} or $0$ process is an identity for the choice operation. \subsection{Concurrence} We recall for a moment our interpretation in which a process is a specification of event occurrences that may be satisfied by a given observation and a process morphism is a proof of implication between such specifications. Then, given a diagram of processes, an observation --- which itself can be viewed as a particularly tight specification --- that satisfies all of the specifications of the diagram in a consistent manner will essentially form a cone over that diagram. Having a single specification or process to represent the entire diagram is just the notion of having a limit over that diagram. Alternatively, the limit could be viewed as a most general observation that captures everything that is known in the diagram. We saw, in the context of schedules that we are actually taking colimits, since the schedule morphisms run opposite to the direction of the implications they represent. {\em Process} morphisms, however, run parallel to the corresponding implications, so the corresponding notion of system composition is given by a {\em limit} rather than by a colimit. {\em Concurrence} is the special case of system composition in which we compose a number of entirely independent processes, i.~e., the diagram of processes being composed is discrete. The concurrence of processes $P^1$,$P^2$,\dots will then be their product $P^1\times P^2\times\dots$, this being analogous to the situation of schedule concurrence as a coproduct. Applying the limit construction to a discrete diagram of two processes $P^1$ and $P^2$, we first see that the alternative set $A_{P^1\times P^2}$ is $A_{P^1}\times A_{P^2}$. This is intuitively sensible in that choosing an alternative of the combined process is merely a matter of independently choosing an alternative from each of the component processes. Given such a choice $p_1\in A_{P^1}$, $p_2\in A_{P^2}$, the corresponding schedule will be the schedule coproduct $P^1p_1\|P^2p_2$; i.~e., once we have picked a pair of alternatives, we form the concurrence of the schedules in the previous manner, so that every event of each schedule appears and no temporal contraints are introduced between events on distinct schedules. As noted before we will, as a matter of clarity, denote the schedule concurrence using the previous notation $\|$ rather than $+$ for coproduct, in order that it not be confused with the process choice ($+$) operation. \subsection{Orthocurrence} The process orthocurrence $P\otimes Q$ can be obtained via a pointwise lifting of schedule orthocurrences. That is, we take each possible choice of an alternative of $P$ and an alternative of $Q$ and construct the corresponding schedule orthocurrence. However, this involves lifting a {\em binary} schedule operation and we have only thus far discussed how one lifts schedule operations defined as {\em unary} functors. If we can find some way to view schedule pairs as being schedules in their own right, then the desired construction follows automatically. First of all, given a pair of schedule categories \%B and $\%B'$, we have a canonical injection functor $\<{-},{-}>:\PROC{\%B}\times\PROC{\%B'}\to\PROC{\%B\times\%B'}$ Given a pair of processes $P\in \PROC{\%B}$, $Q\in \PROC{\%B'}$, each of which we recall is actually a functor, we can pair these to obtain a functor, i.~e., a process $\:A_P\times A_Q\to\%B\times\%B'$ in $\PROC{\%B\times\%B'}$. Process morphisms pair in a similar manner. For pairings of prefix-closed processes we need to consider the prefix classifiers $k$ and $k'$ for \%B and \%B', respectively. In this case, we can use $\$ as a prefix classifier for $\%B\times\%B'$, i.~e., a pair of maps defined to be a prefix map in $\%B\times\%B'$ iff both components are prefixes. Then the injection constructed above will take pairs of prefix-closed processes to prefix-closed processes and similarly for morphisms. Thus we also have a canonical injection $\<{-},{-}>:\PROCK{\%B}\times\%P_{k'}[\%B']\to\%P_{\}[\%B\times\%B']$. If $P$ and $Q$ are branch\-ing-time processes, then $\$ necessarily has only one initial alternative, seeing as the initial schedule of $\%B\times\%B'$ is $\<0_{\%B},0_{\%B'}>$. Likewise a paired prefix will be proper iff at least one of its components is proper. These observations allow us to conclude that $\$ will be a branch\-ing-time process as well. If $P$ and $Q$ are both closed branch\-ing-time processes, and \%B, $\%B'$ have all filtered colimits, then filtered colimits in $\%B\times\%B'$ will simply be pairs of colimits, $\$ is likewise a closed process, and again, a similar conclusion can be drawn for process morphisms. Now we can recall the schedule orthocurrence functor $\otimes:\%B\times\%B\to\%B$, and note that it satisfies the necessary properties for it to lift to a process functor. ${\otimes}_{\#}:\PROC{\%B\times\%B}\to\PROC{\%B}$ \begin{definition} The {\em process orthocurrence} bifunctor $\otimes$ is the composition: $$\commdiag{\PROC{\%B}\times\PROC{\%B}&\rTo{\<{-},{-}>}{}&\PROC{\%B\times\%B}&\rTo{{\otimes}_{\#}}{}\PROC{\%B}\cr}$$ \end{definition}. It should be noted that the process orthocurrence bifunctor itself provides $\PROC{\%B}$ with a monoidal structure, one for which the corresponding identity is the process $I$ consisting of a single alternative whose schedule is $\:I$, the orthocurrence identity in \%B. A more descriptive name for $I$ might be {\sc Bang} or {\sc Tick} since the single underlying schedule is effectively one which has a single indeterminate event. \subsection{Concatenation} For concatenation, recall the universal construction we used for schedules. The basic definition for concatenation on processes is essentially the same, albeit with reversed arrows and some careful consideration as to what the concatenation specifier should be. In the following, $I_{\bot}$ is a two-alternative process in which the alternatives are ordered. The earlier one, $\bot$, has an initial schedule ($I_{\bot}(\bot)=\:0_{\%B}$) while the other alternative $\top$ points to the schedule $\:I_{\%B}$. In \PROCB{\%B} and \PROCBC{\%B}, we have $I_{\bot}=I+0$, but in the more general process categories, $I_{\bot}$ needs to be constructed explicitly to insert the $\bot<\top$ ordering in $A_{I_{\bot}}$. A morphism $e:P\to I_{\bot}$ can be seen as picking out an ``event'' of $P$. In slightly more detail, each alternative $p$ of $A_P$ is taken by $A_e$ either to $\bot$ or to $\top$. In the latter case we then have to choose a schedule map $I\top(=\:I)\to Pp$, i.~e., an actual event of $Pp$. Moreover these choices must be such that if we choose an event for a given alternative, we must choose the corresponding event from each later alternative. In the case of prefix-preserving maps, we must also choose the corresponding event from each earlier alternative as well. In short, $e:P\to I_{\bot}$ picks out an event as it threads its way through the various alternatives of the process. \begin{definition} Given processes $P$, $Q$, and a map $d:2\to I_{\bot}\times I_{\bot}$ (a {\em concatenation specification}), a {\em pre-concatenation} $R=\$ is \begin{itemize} \item a process $R$, together with \item a process map $c_R:R\to P\times Q$, and, \item for every pair of maps $(e:P\to I_{\bot},e':Q\to I_{\bot})$, a particular schedule map $\^r_R(e,e'):R\to 2$ such that the following diagram commutes: $$\commdiag {R&\rTo{\^r_R(e,e')}{}&2\cr \dTo{c_R}{}&&\dTo{}{d}\cr P\times Q&\rTo{e\times e'}{}&I_{\bot}\times I_{\bot}\cr \end{itemize} The {\em concatenation} $P\cat Q$, is a preconcatenation $\$ such that any preconcatenation $R$ factors through it, i.~e., there is a unique map $R\to P\cat Q$ making \begin{equation}\label{diag:preconcat} \begin{diagram} &\SSE_{c_R} \SE^{\lower2pt\hbox{!}}\ESE^{\^r_R(e,e')} \\ && P\cat Q & \rTo_{e\cat e'} & 2 \\ && \dTo_c&&\dTo_d \\ && P\times Q & \rTo^{e\times e'} & I_{\bot}\times I_{\bot}\\ \end{diagram} \end{equation} commute. \end{definition} This definition has essentially the same content as in the schedule case: for each possible choice of an event from $P$ and an event from $Q$ we introduce an additional constraint as specified by $d$. It is only that the notion of picking an event from a process is complicated by there possibly being more than one schedule containing the event in question. As with schedule concatenation, having the concatenation factor through all of the preconcatenations ensures that the resulting structure is minimal in the sense of not introducing any unnecessary constraints or superfluous events. Again, as with schedule concatenation, we could express process concatenation in an equivalent manner as a single pullback \begin{equation}\label{diag:concatpb} \begin{diagram} P\cat Q&\rTo&2^{Hom(P+Q,I_{\bot})}\\ \dTo^c&&\dTo_{d^{Hom(P+Q,I_{\bot})}}\\ P\times Q&\rTo&(I_{\bot}\times I_{\bot})^{Hom(P+Q,I_{\bot})}\\ \end{diagram} \end{equation} in which the bottom arrow is the product over all possible choices of pairs of maps $(e:P\to I_{\bot},e':Q\to I_{\bot})$. Though it is not clear this would be any more illuminating than the original definition, it does provide, via Theorem \ref{thm:plim} and its corollaries, an assurance that the concatenation as defined always exists assuming \%B to be almost cocomplete. This construction however will only yield a recognizable notion of concatenation if we make a sensible choice for $d:2\to I_{\bot}\times I_{\bot}$, much as the definition for schedule concatenation rests on the choice of the schedule concatenation specifier $\:d:\:I\|\:I\to \:2$. Thus we should devote some attention to the question of which map $d$ we should actually use. $I_{\bot}\times I_{\bot}$ is a process with four alternatives, which we may denote, for lack of better names, $\:\OO,\:\OI,\:\IO,\:\II$. Carrying through the product construction, the ordering on $A_{I_{\bot}\times I_{\bot}}$ is the usual product ordering and the corresponding schedule diagram is \nopagebreak \begin{center}\setlength{\unitlength}{0.0088in} \begin{picture}(0,175)(140,680) \thicklines \put( 80,795){\oval(60,30)} \put(200,795){\oval(60,30)} \put( 80,730){\oval(60,30)} \put(200,730){\oval(60,30)} \put(110,730){\vector( 1, 0){ 60}} \put( 80,780){\vector( 0,-1){ 35}} \put(110,795){\vector( 1, 0){ 60}} \put(200,780){\vector( 0,-1){ 35}} \put( 80,735){\circle*{5}} \put(190,735){\circle*{5}} \put(210,795){\circle*{5}} \put(210,725){\circle*{5}} \thinlines \put(210,795){\vector( 0,-1){ 65}} \put( 80,735){\vector( 1, 0){105}} \put(50,757){\makebox(0,0)[rb]{\raisebox{0pt}[0pt][0pt]{$I_{\bot}\times I_{\bot}$:}}} \put(80,820){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:0_{\%B}\|\:0_{\%B}\cong \:0_{\%B}$}}} \put(80,695){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:I_{\%B}\|\:0_{\%B}\cong \:I_{\%B}$}}} \put(200,820){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:0\|\:I_{\%B}\cong \:I_{\%B}$}}} \put(200,695){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:I_{\%B}\| \:I_{\%B}$}}} \end{picture} \end{center} in which all of the maps are the canonical ones. If $\%B=\%D\hPhys$, then the \:I schedules really are singleton event schedules and the underlying event maps will be as shown. At first we shall take $2$ to be just like $I_{\bot}\times I_{\bot}$ except that in the alternative \:\II, in which ``both'' events are present we introduce an extra temporal constraint between them. Otherwise the two processes are identical; in particular, we take $A_d$ to be the identity map. As for ``introducing the extra temporal constraint'', this is done by taking the schedule $2(\:\II)$ to be the schedule concatenation $\:I_{\%B}\cat\:I_{\%B}$ rather than the concurrence as above. The underlying schedules and schedule morphisms of $2$ are thus \begin{equation}\label{eq:2scheds} \setlength{\unitlength}{0.0088in} \begin{picture}(0,175)(140,680) \thicklines \put( 80,795){\oval(60,30)} \put(200,795){\oval(60,30)} \put( 80,730){\oval(60,30)} \put(200,730){\oval(60,30)} \put(110,730){\vector( 1, 0){ 60}} \put( 80,780){\vector( 0,-1){ 35}} \put(110,795){\vector( 1, 0){ 60}} \put(200,780){\vector( 0,-1){ 35}} \put(190,735){\circle*{5}} \put( 80,735){\circle*{5}} \put(210,795){\circle*{5}} \put(210,725){\circle*{5}} \thinlines \put(210,795){\vector( 0,-1){ 65}} \put( 80,735){\vector( 1, 0){105}} \multiput(190,735)(4,-2){6}{\makebox(0.6333,0.9500){\sevrm .}} \put( 50,757){\makebox(0,0)[rb]{\raisebox{0pt}[0pt][0pt]{$2$:}}} \put( 80,820){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:0_{\%B}\|\:0_{\%B}\cong \:0_{\%B}$}}} \put( 80,695){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:I_{\%B}\|\:0_{\%B}\cong \:I_{\%B}$}}} \put(200,820){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:0\|\:I_{\%B}\cong \:I_{\%B}$}}} \put(200,695){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:I_{\%B}\cat \:I_{\%B}$}}} \end{picture} \end{equation} In this diagram, the maps on the bottom and the right are those of the previous diagram composed with the canonical map $\:I_{\%B}\|\:I_{\%B}\to\:I_{\%B}\cat\:I_{\%B}$ for schedule concatenation. We also use the latter for $d_{\:\II}$, the one nontrivial portion of the natural transformation underlying the {\em process} morphism $d:2\to I_{\bot}\times I_{\bot}$. \begin{proposition}\label{prop:concatstr} $A_c:A_{P\times Q}\to A_{P\cat Q}$ is an isomorphism. For any pair of alternatives $\\in A_{P\cat Q}$, the corresponding schedule will be $Pp\cat Qq$. Likewise the schedule portions of the maps $c:P\cat Q\to P\times Q$ and $e\cat e':P\cat Q\to 2$ are obtained from the canonical diagram for schedule concatenation \begin{equation}\label{eq:pconcat} \begin{diagram} \:I\|\:I & \rTo^{e_p\|e'_q}& Pp\|Qq\\ \dTo^{\:d} && \dTo_{c_{\}}\\ 2 & \rTo^{{e\cat e'}_{\}} & Pp\cat Qq\\ \end{diagram} \end{equation} \end{proposition} \begin{proofo} That $A_c$ is an isomorphism follows immediately from consideration of the pullback construction of concatenation (\ref{diag:concatpb}). There, we notice that the two alternative sets on the right are isomorphic. Since the alternative sets themselves form a corresponding pullback by the construction of Theorem \ref{thm:plim}, the two alternative sets on the left must be isomorphic as well. $P\cat Q$ is indeed a preconcatenation, since assembling all of the schedule maps $c_{\}$ and ${e\cat e'}_{\}$ extracted from the diagrams \ref{eq:pconcat} does produce actual process morphisms $c$ and $e\cat e'$. It then remains to be shown that any other preconcatenation $R$ factors through it (as in \ref{diag:preconcat}) Having $A_c:A_{P\cat Q}\to A_{P\times Q}$ be an isomorphism makes determining the map $A_R\to A_{P\cat Q}$ a trivial matter. We are left with the task of producing the underlying schedule maps $Pp\cat Qq\to Rr$ for each alternative $r$ of $R$ (where, in each case, $\=A_{c_R}r$). This is accomplished by showing that $Rr$ is a {\em schedule} preconcatenation and hence the desired schedule maps $Pp\cat Qq\to Rr$ exist and are unique by virtue of $Pp\cat Qq$ being a schedule concatenation. Showing that $Rr$ is a {\em schedule} preconcatenation entails showing that for any pair of schedule maps $(\:e:\:I\to Pp,\:e':\:I\to Qq)$ we can find a map $\:e\cat\:e':\:2\to Rr$ making $$\begin{diagram} \:I\|\:I & \rTo^{\:e\|\:e'}& Pp\|Qq\\ \dTo^{\:d} && \dTo_{c_{\}}\\ 2 & \rTo^{\:e\cat\:e'} & Pp\cat Qq\\ \end{diagram}$$ commute. \begin{lemma} Given a process $P$, some alternative $p$ thereof, and any schedule map $\:e:\:I\to Pp$, there exists a process map $e:P\to I_{\bot}$ such that $e_p=\:e$. \end{lemma} \begin{proofo} Take $A_e(p')=\top$ if $p\le p'$, $\bot$ otherwise. Likewise, take $e_{p'}=P(p\le p')\circ\:e$ if $p\le p'$ and the canonical initial map otherwise. \end{proofo} \noindent{\it Outline of Proof of Proposition \ref{prop:concatstr} (conclusion):\quad} Given schedule maps $\:e$ and $\:e'$, we have corresponding process maps $e$ and $e'$ from which we can take advantage of $R$ being a process preconcatenation. This produces a process map $\^r(e,e'):R\to 2$ and then one has to check that the schedule map $\^r(e,e')_r:\:2\to Rr$ is exactly the map we want for $\:e\cat\:e'$. \end{proofo} This is the same notion of concatenation that appeared in the original pomset model, which in turn was inspired by that for trace theory. That is, $P\cat Q$ is the set of all possibilities involving a schedule of $P$ concatenated with a schedule of $Q$. One may now notice a problem in applying this to the case of prefix ordered processes: the map on the right side of diagram \ref{eq:2scheds}, i.~e., the one mapping $\:0\conc\:I_{\%B}\to\:I_{\%B}\cat\:I_{\%B}$, is {\em not} a prefix map. That there is a problem should not actually be surprising since it is with the prefix ordering structure that we make any kind of connection between the alternative ordering and the temporal constraints. Consider that when we advance from an alternative $p$ to a later alternative $p'$, the new events of $Pp'$ will necessarily be after the ones that have already appeared in $Pp$. But when forming the process concatenation $P\cat Q$, one would hope to capture the sense in which nothing happens in process $Q$ until after $P$ is ``finished.'' In our context, given alternatives $p\le\$, then $\$ is a later stage of $A_{P\cat Q}$ at which we can arrive by ``doing something'' on $Q$. The fact that the $Q$ portion of the process $P\cat Q$ is now active suggests that we should not then be able to have $\<\$, i.~e., to be able to do something on $P$. In general, having $A_{P\cat Q}$ be isomorphic to $A_{P\times Q}$ turns out to be not so appropriate. The immediate problem of $\:0\conc\:I_{\%B}\to\:I_{\%B}\cat\:I_{\%B}$ not being a prefix map is solved by using a different $2$, one in which the troublesome ordering $\:\OI\le\:\II$ is simply not present in $A_2$. This can easily be done since we only need to have $A_2$ mapping into $A_{I_{\bot}\times I_{\bot}}$; they do not need to be isomorphic. Then we can take the underlying schedule maps to be \begin{equation} \setlength{\unitlength}{0.0088in} \begin{picture}(0,175)(140,680) \thicklines \put( 80,795){\oval(60,30)} \put(200,795){\oval(60,30)} \put( 80,730){\oval(60,30)} \put(200,730){\oval(60,30)} \put(110,730){\vector( 1, 0){ 60}} \put( 80,780){\vector( 0,-1){ 35}} \put(110,795){\vector( 1, 0){ 60}} \put(190,735){\circle*{5}} \put( 80,735){\circle*{5}} \put(210,795){\circle*{5}} \put(210,725){\circle*{5}} \thinlines \put( 80,735){\vector( 1, 0){105}} \multiput(190,735)(4,-2){6}{\makebox(0.6333,0.9500){\sevrm .}} \put( 50,757){\makebox(0,0)[rb]{\raisebox{0pt}[0pt][0pt]{$2$:}}} \put( 80,820){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:0_{\%B}\|\:0_{\%B}\cong \:0_{\%B}$}}} \put( 80,695){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:I_{\%B}\|\:0_{\%B}\cong \:I_{\%B}$}}} \put(200,820){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:0\|\:I_{\%B}\cong \:I_{\%B}$}}} \put(200,695){\makebox(0,0)[b]{\raisebox{0pt}[0pt][0pt]{$\:I_{\%B}\cat \:I_{\%B}$}}} \end{picture} \end{equation} and carry through the previous concatenation construction as before. Consider for example the concatenation of two processes $P$ and $Q$ each with a linear set of alternatives, the first alternative of each being initial (the empty schedule). Using the original version of $2$ and ignoring the prefix ordering requirement, i.~e., working in \PROC{\%B}, the alternative set $A_{P\cat Q}$ of the concatenation is exactly like that of the concurrence $A_{P\times Q}$. \begin{equation} \setlength{\unitlength}{0.0088in} \begin{picture}(140,95)(30,720) \thicklines \put(195,740){\makebox(0,0)[l]{$A_{P\cat Q}=A_P\times A_Q$}} \put( 70,770){\vector( 0,-1){30}} \put( 70,740){\vector( 0,-1){30}} \put(100,770){\vector( 0,-1){30}} \put(100,740){\vector( 0,-1){30}} \put(130,770){\vector( 0,-1){30}} \put(130,740){\vector( 0,-1){30}} \put(160,770){\vector( 0,-1){30}} \put(160,740){\vector( 0,-1){30}} \put(190,770){\vector( 0,-1){30}} \put(190,740){\vector( 0,-1){30}} \put( 70,710){\vector( 1, 0){30}} \put(100,710){\vector( 1, 0){30}} \put(130,710){\vector( 1, 0){30}} \put(160,710){\vector( 1, 0){30}} \put( 70,740){\vector( 1, 0){30}} \put(100,740){\vector( 1, 0){30}} \put(130,740){\vector( 1, 0){30}} \put(160,740){\vector( 1, 0){30}} \put( 70,770){\vector( 1, 0){30}} \put(100,770){\vector( 1, 0){30}} \put(130,770){\vector( 1, 0){30}} \put(160,770){\vector( 1, 0){30}} \put( 25,740){\makebox(0,0)[r]{$A_P$}} \put( 30,740){\vector( 0,-1){ 30}} \put( 30,770){\vector( 0,-1){ 30}} \put(130,820){\makebox(0,0)[b]{$A_Q$}} \put(160,810){\vector( 1, 0){ 30}} \put(130,810){\vector( 1, 0){ 30}} \put(100,810){\vector( 1, 0){ 30}} \put( 70,810){\vector( 1, 0){ 30}} \put( 70,800){\makebox(0,0)[b]{$\bot_Q$}} \end{picture} \end{equation} Using the revised $2$ and working in the prefix ordered category \PROCK{\%B}, an argument similar to that of Proposition \ref{prop:concatstr} shows that underlying sets of $A_{P\cat Q}$ and $A_{P\times Q}$ are the same, though we can no longer expect the ordering to be the same as well. The underlying object map of $A_{P\cat Q}$ and $A_{P\times Q}$ is thus fixed. That $P\cat Q$ is at least a preconcatenation requires that for every pair of maps $(e:P\to I_{\bot},e':Q\to I_{\bot})$, we have a map $P\cat Q\to 2$, the monotonicity of which dictates that certain orderings not be present in $A_{P\cat Q}$. So for any particular $(e, e')$ pair we can deduce the maximal order that $A_{P\cat Q}$ can have to allow for a map into $A_2$ \begin{equation} \setlength{\unitlength}{0.0088in} \begin{picture}(140,100)(30,720) \thicklines \put(195,740){\makebox(0,0)[l]{$A_{P\cat Q}=A_P\times A_Q$}} \put( 70,770){\vector( 0,-1){30}} \put( 70,740){\vector( 0,-1){30}} \put(100,770){\vector( 0,-1){30}} \put(100,740){\vector( 0,-1){30}} \put(130,740){\vector( 0,-1){30}} \put(160,740){\vector( 0,-1){30}} \put(190,740){\vector( 0,-1){30}} \put( 70,710){\vector( 1, 0){30}} \put(100,710){\vector( 1, 0){30}} \put(130,710){\vector( 1, 0){30}} \put(160,710){\vector( 1, 0){30}} \put( 70,740){\vector( 1, 0){30}} \put(100,740){\vector( 1, 0){30}} \put(130,740){\vector( 1, 0){30}} \put(160,740){\vector( 1, 0){30}} \put( 70,770){\vector( 1, 0){30}} \put(100,770){\vector( 1, 0){30}} \put(130,770){\vector( 1, 0){30}} \put(160,770){\vector( 1, 0){30}} \put( 25,740){\makebox(0,0)[r]{$A_P$}} \put( 30,770){\vector( 0,-1){ 30}} \put( 30,740){\vector( 0,-1){ 30}} \put( 30,740){\circle{10}} \put( 30,710){\circle{10}} \put(130,825){\makebox(0,0)[b]{$A_Q$}} \put(160,810){\vector( 1, 0){ 30}} \put(130,810){\vector( 1, 0){ 30}} \put(100,810){\vector( 1, 0){ 30}} \put( 70,810){\vector( 1, 0){ 30}} \put( 70,800){\makebox(0,0)[br]{$\bot_Q$}} \put(130,810){\circle{10}} \put(160,810){\circle{10}} \put(190,810){\circle{10}} \end{picture} \end{equation} \noindent in which we placed circles on those alternatives of $A_P$ and $A_Q$ that are sent by $A_e$ (resp. $A_{e'}$) to the noninitial alternative $\:I$ of $I_{\bot}$. Note that if $A_ep=\:I$, then $A_ep'=\:I$ for all $p'\ge p$ by functoriality. Combining all of the constraints on the $A_{P\cat Q}$ ordering imposed by all of the possible choices for $e$ and $e'$, we obtain the actual ordering for $A_{P\cat Q}$ \begin{equation} \setlength{\unitlength}{0.0088in} \begin{picture}(140,100)(30,720) \thicklines \put(195,740){\makebox(0,0)[l]{$A_{P\cat Q}$}} \put( 70,770){\vector( 0,-1){30}} \put( 70,740){\vector( 0,-1){30}} \put( 70,710){\vector( 1, 0){30}} \put(100,710){\vector( 1, 0){30}} \put(130,710){\vector( 1, 0){30}} \put(160,710){\vector( 1, 0){30}} \put( 70,740){\vector( 1, 0){30}} \put(100,740){\vector( 1, 0){30}} \put(130,740){\vector( 1, 0){30}} \put(160,740){\vector( 1, 0){30}} \put( 70,770){\vector( 1, 0){30}} \put(100,770){\vector( 1, 0){30}} \put(130,770){\vector( 1, 0){30}} \put(160,770){\vector( 1, 0){30}} \put( 25,740){\makebox(0,0)[r]{$A_P$}} \put( 30,740){\vector( 0,-1){ 30}} \put( 30,770){\vector( 0,-1){ 30}} \put( 30,775){\makebox(0,0)[b]{$\bot_P$}} \put(130,820){\makebox(0,0)[b]{$A_Q$}} \put(160,810){\vector( 1, 0){ 30}} \put(130,810){\vector( 1, 0){ 30}} \put(100,810){\vector( 1, 0){ 30}} \put( 70,810){\vector( 1, 0){ 30}} \put( 70,800){\makebox(0,0)[br]{$\bot_Q$}} \end{picture} \end{equation} \noindent though we omit the proof that this alternative order together with the usual schedule concatenations constitutes the actual process $P\cat Q$ in \PROCK{\%B}. Notice that this ordering of $A_{P\cat Q}$ manages to omit exactly those pairs of alternatives for which the corresponding schedule map is {\em not} a prefix. In the branch\-ing-time process category \PROCB{\%B}, since we know that there exists a construction of concatenation as a limit, this limit will in fact be a branch\-ing-time process if its components are. For the concatenation construction to be usable in the category of closed processes \PROCBC{\%B} it suffices that schedule concatenation, itself a colimit, commute with the taking of filtered colimits, but this follows from colimits commuting with colimits in general. \section{Iterated operations} Finally, we have to contend with the class of iterated operations. One such is the Kleene star or iterated concatenation, $P^{*}$, in which the alternatives are sequences of alternatives from $P$ while the corresponding schedules are the corresponding schedule concatenations. Analogously, we also have the iterated concurrence $P^{\dagger}$ as introduced in \cite{Pr84}. These, among others, can be constructed by taking the fixpoint of a suitable process functor. In general, it will be useful to have criteria by which one may know that these fixpoints exist. We start by stating a slight generalization of a result of Plotkin and Smythe \cite{PS78} \begin{theorem}\label{th:fix} Given two categories \%C, \%D, the latter having $\omega$-colimits and an initial object 0, along with a functor $F:\%C\times\%D\to\%D$ preserving $\omega$-colimits, there exists an initial fixpoint $F^{\omega}:\%C\to\%D$. That is, we have \begin{itemize} \item a functor $F^{\omega}:\%C\to\%D$ and \item a canonical natural isomorphism $\tau:F\<\id_{\%C},F^{\omega}>\nato F^{\omega}$ that is initial in the category of pre-fixpoints of $F$, a pre-fixpoint being a pair $(G,\gamma)$ in which $G:\%C\to\%D$ is a functor and $\gamma:F\<\id{\%C},G>\nato G$). \end{itemize} Furthermore, if {\%C} has $\omega$-colimits, then $F^{\omega}$ preserves them \end{theorem} \begin{proofo} We can fix an object $A$ of \%C, in which case the proof follows the same line as in \cite{PS78}, that is, we take the colimit of $$0\to F(A,0)\to F(A,F(A,0))\to F(A,F(A,F(A,0)))\to\cdots$$ to obtain $F^{\omega}A$ and a canonical isomorphism $\tau:FF^{\omega}A\to F^{\omega}A$. For any morphism, $f:A\to B$, we have a natural transformation between the respective diagrams on $A$ and $B$ which produces a morphism $F^{\omega}f:F^{\omega}A\to F^{\omega}B$. The final statement is a consequence of the uniqueness of colimits in \end{proofo} \vbox {Actually, we will be using the dual result: \addtocounter{theorem}{-1} \begin{theorem}[dual] Given two categories \%C, \%D, the latter having $\omega$-limits and a terminal object $1$, along with a functor $F:\%C\times\%D\to\%D$ preserving $\omega$-limits, there exists a fixpoint functor $F^{\omega}:\%C\to\%D$ with a canonical natural isomorphism $\tau:F^{\omega}\nato F\<\id_{\%C},F^{\omega}>$. Furthermore, if {\%C} has $\omega$-limits, then $F^{\omega}$ preserves them \end{theorem}} We use this to define the iterated concurrence operation ${}^\dagger:\PROC{\%B}\to\PROC{\%B}$ (see \cite{Pr86}) by taking the fixpoint of \begin{eqnarray*} 1+(\pi_1\|\pi_2):\PROC{\%B}\times\PROC{\%B}&\to&\PROC{\%B}\cr (P,Q)&\mapsto& 1+(P\|Q)\cr \end{eqnarray*} That this functor preserves $\omega$-limits is a matter of the functors $1+{-}$ and ${-}\|{-}$ preserving $\omega$-limits. By Theorem \ref{th:fix}, we then have a functor \begin{eqnarray*} {}^\dagger:\PROC{\%B}&\to&\PROC{\%B}\cr P&\mapsto&P^\dagger\cr \end{eqnarray*} along with a canonical morphism $P\to 1+P\|P$ and the fact that ${}^\dagger$ also preserves $\omega$-limits. In a similar manner, for a particular process $P$ we can define the iterated concatenation $P^*$ as the fixpoint of $$1+(P\cat\pi_2):\PROC{\%B}\to\PROC{\%B}$$ which also provides a canonical morphism $P^*\to 1+P\cat P^*$. Since the $\cat$ functor only preserves filtered colimits in its second argument, this is the best we can do. This construction is applicable in \PROCK{\%B}, \PROCB{\%B}, and \PROCBC{\%B} as well. However, in \PROCBC{\%B}, unlike in the other cases we have the closure property. A closed process containing a particular ordered (a subcase of filtered) sequence of schedules must also contain the colimit schedule as an alternative. We have all finite concatenations of schedules from $P$ as before, but now we also have all infinite concatenations of schedules from $P$ as well. \chapter{Example: Dataflow Networks}\label{chapter:dflow} We now present an example representing a fairly well-known model, the dataflow model of Kahn \cite{Ka74}. The particular result that we derive has been obtained before, both by Rabinovich \cite{Rab} for pomset semantics and by Gaifman and Pratt \cite{GP} for the dual-order prosset model. The difference here is that we use the limit semantics for system composition introduced above, and the temporal model is sufficiently abstract to allow for the use of arbitrary {\%D\hPhys} temporal structures (e.g., intervals). Informally, we consider a collection of {\em processors}, each of which has as its sole means of communication with the outside world a set of {\em ports}. Essentially the processor reacts to data items appearing on any of its input ports, possibly transmitting data items on one or more of its output ports. A dataflow network consists of a set of processors together with a set of transmission {\em channels} linking them. One may distinguish between channels that are purely {\em input} channels, i.e., that connect only input ports and those channels which are driven by an output port on some processor. Data items that are broadcast on a given channel are then seen on the input ports to which that channel connects. Channels are sequential in the sense that only one data item may be broadcast at a time. Taken collectively the network itself may be viewed as a processor in the previous sense. The set of input ports of the processor is the set of input channels of the network while the output ports are some designated set of channels driven by particular processors, the remaining channels becoming {\em internal} channels which are not seen by the outside world. \section{Kahn's Semantics} Kahn's result concerns a network of processors that ``compute functions.'' This is formalized as follows. The channels carry data items drawn from some alphabet $\Sigma$, which may be different for different channels. We describe what happens on a particular channel during some computation via its history, which will be a (possibly infinite) string on the alphabet $\Sigma$, i.~e., some element of $\Sigma^\leomega$, a cpo which we take to be ordered by prefix. The behavior of processor $i$ is described by a monotone continuous (i.~e., directed-join preserving) {\em function} relating the output string to the given input strings. For example, we could have a network of 3 processors \begin{equation} \setlength{\unitlength}{0.0088in} \begin{picture}(613,171)(55,630) \thinlines \put( 80,775){\vector( 1, 0){ 85}} \put(585,765){\vector( 1, 0){ 65}} \put(425,715){\line( 1, 0){ 50}} \put(475,715){\line( 0,-1){ 75}} \put(475,640){\line(-1, 0){365}} \put(110,640){\line( 0, 1){115}} \put(110,755){\vector( 1, 0){ 55}} \put(390,705){\vector( 3, 1){135}} \put(250,765){\vector( 3,-2){ 90}} \put(225,765){\vector( 1, 0){300}} \thicklines \put(165,745){\framebox(60,40){}} \put(340,680){\framebox(50,40){}} \put(525,735){\framebox(60,45){}} \put( 55,770){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$X$}}} \put(425,695){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$Y$}}} \put(285,775){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$Z$}}} \put(660,760){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$W$}}} \put(185,760){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$F_1$}}} \put(355,695){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$F_2$}}} \put(545,752){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$F_3$}}} \end{picture} \end{equation} \noindent whose behaviors are described by the functions \begin{eqnarray*} F_1:\Sigma_X^\leomega\times\Sigma_Y^\leomega&\to&\Sigma_Z^\leomega \\ F_2:\Sigma_Z^\leomega&\to&\Sigma_Y^\leomega \\ F_3:\Sigma_Y^\leomega\times\Sigma_Z^\leomega&\to&\Sigma_W^\leomega \end{eqnarray*} Kahn's main result is that, by taking the least fixpoint of the system \begin{eqnarray*} Z&=&F_1(X,Y) \\ Y&=&F_2(Z) \\ W&=&F_3(Y,Z) \end{eqnarray*} we get a function $\overline{F}:\Sigma_X^\leomega\to\Sigma_W^\leomega$ which describes the behavior of the network as a whole with respect to the input $X$ and the output $W$. \section{Extending the Kahn Model} That the relation between the input histories and the output histories be a functional one for each of the processors is critical to the applicability of Kahn's theorem. Brock and Ackerman \cite{BA77} discuss anomalies that occur when one considers networks with one or more processors having internal nondeterminism in their behavior. One of the motivations of the original pomset model was to provide an extension of Kahn's model to treat situations with nondeterminism. One of the usual questions with such extensions is whether they are {\em conservative} in the sense of obtaining matching results in those situations in which the Kahn model is applicable. Rabinovich \cite{Rab} and Gaifman and Pratt \cite{GP} answered this in the affirmative, given a particular representation of dataflow processors in terms of pomset processes and a particular notion of composition of processes. Our goal here is to emulate the Gaifman-Pratt result, which in our terms, applies to processes constructed over the schedule category \_3\hPhys, except that we will use our notion of composition of process specifications via categorical limit, rather than the {\em fusion} described in \cite{GP}, and the result will apply to $\_D\hPhys$ for arbitrary choice of temporal domain \_D. In our framework, we view each processor as a component and, moreover, we view each channel as a component as well. Thus, the above network becomes a diagram \begin{equation}\label{pict:pnet} \setlength{\unitlength}{0.0088in} \begin{picture}(356,176)(100,585) \thinlines \put(450,730){\vector( 0,-1){105}} \put(110,725){\vector( 0,-1){110}} \put(430,725){\vector(-2,-1){210}} \put(120,730){\vector( 2,-1){210}} \put(445,730){\vector(-3,-4){ 75}} \put(280,725){\vector( 2,-3){ 60}} \put(280,725){\vector(-3,-4){ 75}} \put(115,730){\vector( 2,-3){ 70}} \put(100,740){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P_1$}}} \put(265,740){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P_2$}}} \put(435,740){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$P_3$}}} \put(100,595){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\^S_X^\leomega$}}} \put(195,600){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\^S_Y^\leomega$}}} \put(340,605){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\^S_Z^\leomega$}}} \put(440,600){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\^S_W^\leomega$}}} \end{picture} \end{equation} \noindent where each of the $P_i$'s is a process that ``computes the function $F_i$'' in the sense to be described below. Our contention is that our notion of system composition via taking the limit of this diagram matches the least fixpoint semantics of Kahn, that is, taking the limit of this diagram yields a process $P$ with morphisms $P\to\Sigma_X^\leomega$ and $P\to\Sigma_W^\leomega$ such that $P$ computes the function $\overline{F}$. \subsection{Input Locations} For the purposes of this chapter we will be working within the category of closed branching-time coherent processes (\CPROCBC{\%B}). In addition we need to be able to have temporal constraints forcing events to strictly precede other events, and to prohibit morphisms from identifying such events that are in strict precedence. Thus we need to have our underlying temporal domain be some $\%D\hPhys$. Recall that we say an event $x$ {\em strictly precedes} another event $y$ (write $xn$. Otherwise, $a_n$ is a prefix of $F(a_n,b)$ and we append the $(n+1)^{\rm th}$ symbol of $F(a_n,b)$ to $a_n$ to obtain $a_{n+1}$. $p_{n+1}$ is then the extension of $p_n$ for which $(A_f\times A_g)p_{n+1}=(a_{n+1},b)$ obtained via $f\times g$ being an input morphism. Note that the usual sequence one uses to construct $\Fbar$ via induction (i.e., $F(\bot,b)$, $F(F(\bot,b),b)$, $F(F(F(\bot,b)))$, etc.) is a subsequence of the $a_n$'s. Thus, $\bigvee a_n=\Fbar(b)=a$. We then let $p=\bigvee p_n$ in $A_P$ and by the continuity of $f$ and $h$, $A_fp=A_hp=a$. It now remains to show that the desired coequalizer exists. Let $r\to p$ be the coequalizer in ($\%D\hCat\comma\Set$); recall that this latter category is cocomplete. If $r$ is actually in the subcategory ($\%D\hPhys\comma\Set$), i.e., if its events satisfy the criterion $\delta(x,x)=I$, then we are done. Since the prefix of a coequalizer is itself a coequalizer, it suffices to inductively verify this criterion for all of the prefixes $r_n$ of $r$, where $r_n\to p_n$ is the coequalizer of $f_{p_n},h_{p_m}i_{nm}:\Gleo(a_n)\to P(p_n)$ (where $m$ is such that $F(a_n,b)=a_m$; $i_{nm}$ is the prefix injection). That is, one verifies for all events $x$ of $r_n$ not already in $r_{n-1}$, that $\delta_r(x,x)=I$. Due to the way one constructs colimits in $\%D\hCat$ for {\%D} a preorder, this is not the case only if there is closed path in $p_n$ of the form $y=y_0,y_1,\ldots,y_{2k+1}=y$ where $y_{2j-1}=\:f_{{\eighttt p}_n}z_j$ and $\:h_{{\eighttt p}_m}z_j=y_{2j}$ ($z_j$ all in $\Gleo(a_n)$) such that $$\delta(y_0,y_1)\otimes\delta(y_2,y_3)\otimes\ldots\otimes\delta(y_{2k},y_{2k+1})> I$$ One then shows, for any event $y$ in $p_n$ not already in $p_{n-1}$, that such a loop does not exist, on the assumption that there are no such loops in $p_{n-1}$. The possibilities for loops in $p_n$ are as follows. The case $n=0$ is trivial since $a_0$ has no events. For $n=1$, use condition~\ref{inp:early} in the definition of input morphism. For $n>1$, use condition~\ref{inp:strict}, that successive events $z,z'$ of $\Gleo(a_n)$ have $\delta(z,z')=I$ and that $f$ is a restriction (so that $\delta(\:f_{{\eighttt p}_n}z,\:f_{{\eighttt p}_n}z')=I$ as well). Having shown that $R$ has alternatives for each of the elements of the graph of $\Fbar$, it only remains to show that $g\tau:R\to X$ is an input morphism. This can be left as an exercise as the arguments involved are not too dissimilar from what has gone before. \end{proofo} To go from this to a general result is a matter of considering that for any given system of functional equations, the least fixpoint we obtain is the same, whether we compute it in stages progressively identifying one pair of variables at a time or identifying them all at once. Likewise, in any diagram of processes like (\ref{pict:pnet}), we obtain the same result whether we compute the limit all at once or substitute equalizers \nobreak piecemeal. \chapter{Directions for Future Work}\label{chapter:conc} \subsubsection{Conjunctive Normal Form and Event Structures} It has already been noted that our process specifications are essentially presented in ``disjunctive normal form''. Can one devise any corresponding ``conjunctive normal form''? This would be a model where the temporal relationships (rather than the choices) now form the outer structure, the disjunctive aspects being handled by some substructures where the events used to be. We consider a somewhat looser sense of CNF, taking a particular transformation one may perform on any prefix-ordered process $P$ to obtain a form of annotated schedule. The first observation is that the constituent behaviors, together with the morphisms relating them, form a diagram of type $A_P$. In the case of a prefix ordered process, prefix ordering guarantees that this diagram has a colimit. Now as a matter of terminology, for each event $e$ of the colimit and each of the alternatives $a$ in $A_P$, we say that $e$ is {\em present} in $a$ iff the image of the schedule $P(a)$ under the injection into the colimit schedule $E_P$ contains $e$. The subset of all alternatives of $A_P$ in which $e$ is present we denote $\^f_P(e)$. Since the process is prefix ordered, an event present in $a$ will also be present in all ``later'' alteratives $a'$, i.~e., all alternatives $a'$ for which $a\le a'$; thus $\^f_P(e)$ is necessarily a {\em filter} (upward-closed subset) of $A_P$. Essentially, we obtain a schedule $E_P$ together with an annotation $\^f_P:E_P\to\%F(E)$ of the schedule that indicates which subsets of events may collectively occur in any given scenario. In the case of a partial order temporal structure, this looks very much like one of Winskel's {\em prime event structures} \cite{Win86}. \begin{definition} A prime event structure is a partially ordered set $(E,{\le})$ together with a {\em consistency} predicate $\^z$ on finite subsets of $E$ satisfying \begin{eqnarray} &&\{e'|e'\le e\}\hbox{ is finite}\label{es:finite}\\ &&\{e\}\in\^z\\ Y\subseteq X\land X\in\^z&\Longrightarrow&Y\in\^z\\ X\in\^z\land\exists e'\in X:e\le e'&\Longrightarrow&X\cup\{e\}\in\^z \end{eqnarray} for all $e,e'\in E$ and all finite subsets $X,Y$ of $E$. \end{definition} If one is willing to ignore the property (\ref{es:finite}), one may crudely derive an ``event structure'' from a prefix-closed process $P$ in $\PROCK{\_\Pos_{<}}$ by constructing the event set $E_P$ via a colimit as described above and then defining $\^z_P$ via \begin{equation} X\in\^z_P\iff\bigcap_{e\in X}\^f(e)\ne\emptyset \end{equation} that is, a finite subset $X\subseteq E$ of events is {\em consistent} iff there exists some alternative $a$ in which they are all present. This reduction, however, loses information since, without any further conditions on the structure of $A_P$, there is, in general, no way to rederive the individual schedules $P(a)$ comprising the colimit schedule $E_P$. Also from Winskel: \begin{definition} Given a prime event structure $(E,{\le},\^z)$, a subset of $E$ is {\em consistent} iff all of its finite subsets are in $\^z$. A subset $D$ is {\em left-closed} iff $e'\le e\land e\in D\implies e'\in D$ for all $e,e'$ in $E$. \end{definition} The set of all left-closed, consistent subsets of $E$ we denote $\_L(E)$, which can in turn be ordered by inclusion. Clearly, $\_L(E)$ will include all of the original schedules\footnote{Actually, their images under the colimit injection into $E$} comprising $P$, but it will also include all {\em prefixes} of those schedules as well. There may also be some orderings in $\_L(E)$ that were not present in the original $A_P$. One way out is to consider more restrictions on the structure of the process $P$, i.~e., to somehow stipulate that for every alternative $a$ of $A_P$, and every possible distinct prefix $\:p'$ of the schedule $P(a)$, there exists a unique alternative $a'\le a$ such that $P(a')=\:p'$. This is effectively what Rabinovich and Trakhtenbrot do in their work on {\em configuration structures} \cite{RT}. To formalize this in our framework, we first take note of a correspondence Winskel derives between prime event structures and {\em finitary prime algebraic domains}. First we need a rapid review of some domain theory terminology: \begin{definition} Given a partial order $(D,\sqsubseteq)$, \begin{itemize} \item A subset $D'\subseteq D$ is {\em finitely compatible} iff each of its finite subsets $X$ has a least upper bound $\bigsqcup X$ in $D$. \item $D$ is {\em consistently complete} iff all of its finitely compatible subsets have least upper bounds. \item An element $f\in D$ is {\em finite} iff for all directed subsets $S$ of $D$, $f\sqsubseteq\bigsqcup S\implies f\sqsubseteq s$ for some $s\in S$. \item $D$ is {\em algebraic} iff for every element $d\in D$, we have $d=\bigsqcup\{f\sqsubseteq d|f\hbox{ is finite}\}$. \item For any element $d\in D$, the {\em left closure}, $\lfloor d\rfloor$, is the set $\{e|e\sqsubseteq d\}$. \item $D$ is {\em finitary} iff $\lfloor f\rfloor$ is finite for all finite $f$ \item An element $p$ is a {\em complete prime} of $D$ iff for any subset $D'$ with a least upper bound, $p\sqsubseteq\bigsqcup D'\implies \exists d\in D':p\sqsubseteq d$. \item $D$ is {\em prime algebraic} iff for every element $d\in D$, we have\hfil\break $d=\bigsqcup\{p\sqsubseteq d|p\hbox{ is a complete prime}\}$. \end{itemize} \end{definition} Now, for any finitary prime algebraic domain $(D,\sqsubseteq)$, we can take the set of its primes $\_\Pr(D)$ ordered by $\sqsubseteq$, and define a consistency predicate $\^z_{\_\Pr(D)}$ that contains all sets of primes having a least upper bound, at which point Winskel then proves (see \cite{Win86}) \begin{theorem} \begin{itemize} \item For any finitary prime algebraic domain $(D,\sqsubseteq)$, we have that $(\_\Pr(D),\sqsubseteq,\^z_{\_\Pr(D)})$ is a prime event structure, and $(\_L\/\_\Pr(D),\subseteq)$ and $(D,\sqsubseteq)$ are isomorphic as partial orders. \item For any prime event structure $(E,{\le},\^z)$, $(\_L(E),\subseteq)$ is a finitary prime algebraic domain whose complete primes are the subsets $\lfloor e \rfloor$ for all $e\in E$ (and so $\_\Pr\/\_L(E)$ and $E$ are likewise isomorphic). \end{itemize} \end{theorem} The essential point is that we can stipulate a class of processes $\%P_{pc}[\_2\hCat]$ which we call {\em prefix-closed} in which we require the alternative set $A_P$ to be a finitary prime algebraic domain, and for all alternatives $a\in A_P$ either \begin{itemize} \item $a$ is a complete prime and $P(a)$ contains exactly one event $e$ that is not in the image of any other $P(a')$ and is maximal in $P(a)$. \item $a$ is not a complete prime and all events in $P(a)$ are present in previous alternatives. \end{itemize} This class of processes, a subclass of branching-time processes is so severely restricted that building the colimit of the schedules, tossing away the alternative set and remembering only the consistency predicate loses no information; one can reconstruct the original process. Unfortunately, the restrictions are so severe that we cannot take advantage of any flexibility in choice of temporal domain \%D; all of the schedules perforce look like partial order schedules. Clearly, the more annotation we keep on our events when converting a process to an annotated schedule, the fewer requirements that need to be placed on the structure of the process in order that this conversion be bijective. The question of what annotation suffices, e.g., for branching-time or closed branching-time processes is an open one. Presumably this would yield some form of ``live'' event structure that would be able to express the notion of more than one event being forced to occur at a given stage, thus facilitating the discussion of liveness properties. One area for possible improvement includes finding less restrictive notions of prefix. The current prefix notion for {\_R\hPhys} stipulates that if there is any upper bound at all from event $x$ to event $y$ and $x$ is part of a prefix, so is $y$. This actually makes sense from the point of view that if alternatives are merely ordered, one can say nothing about the timing of decisions. If $y$ is to be not in the prefix but part of the next stage, then $y$ must be allowed to occur an arbitrarily long time after $x$ since the decision to advance to the next stage may be indefinitely postponed. This suggests instead exploring some form of further enrichment for the alternative set, one which would not only provide orderings between alternatives, but also give time limits on the decisions between them. \appendix \bibliographystyle{plain} \begin{thebibliography}{10} \bibitem{BCSW} R.~Betti, A.~Carboni, R.~Street, and R.~Walters. \newblock Variation through enrichment. \newblock {\em J. Pure and Applied Algebra}, 29:109--127, 1983. \bibitem{BA77} J.D. Brock and W.B. Ackerman. \newblock An anomaly in the specifications of nondeterministic packet systems. \newblock Technical Report Computation Structures Group Note CSG-33, MIT Lab. for Computer Science, November 1977. \bibitem{Cas91} Ross Casley. \newblock {\em On the Specification of Concurrent Systems}. \newblock PhD thesis, Stanford University, January 1991. \bibitem{CCMP} R.T Casley, R.F. Crew, J.~Meseguer, and V.R. Pratt. \newblock Temporal structures. \newblock In {\em Proc. Conf. on Category Theory and Computer Science, LNCS 389}, Manchester, September 1989. Springer-Verlag. \newblock Revised version to appear in Math. Structures in Comp. Sci., {\bf 1}:2, 1991. \bibitem{Flo62} R.W. Floyd. \newblock Algorithm 97: shortest path. \newblock {\em Communications of the ACM}, 5(6):345, 1962. \bibitem{GP} H.~Gaifman and V.R. Pratt. \newblock Partial order models of concurrency and the computation of functions. \newblock In {\em Proc. 2nd Annual IEEE Symp. on Logic in Computer Science}, pages 72--85, Ithaca, NY, June 1987. \bibitem{Gra} J.~Grabowski. \newblock On partial languages. \newblock {\em Fundamenta Informaticae}, IV.2:427--498, 1981. \bibitem{Gre} I.~Greif. \newblock {\em Semantics of Communicating Parallel Processes}. \newblock PhD thesis, Project MAC report TR-154, MIT, 1975. \bibitem{Joh82} P.T. Johnstone. \newblock {\em Stone Spaces}. \newblock Cambridge University Press, 1982. \bibitem{J83} P.T. Johnstone. \newblock Fibred categories. \newblock Lecture notes, University of Cambridge, 1983. \bibitem{Ka74} G.~Kahn. \newblock The semantics of a simple language for parallel programming. \newblock In {\em Proc. IFIP Congress 74}. North-Holland, Amsterdam, 1974. \bibitem{Kel} G.M. Kelly. \newblock {\em Basic Concepts of Enriched Category Theory: London Math. Soc. Lecture Notes}. \newblock 64. Cambridge University Press, 1982. \bibitem{Mac} S.~Mac{ }Lane. \newblock {\em Categories for the Working Mathematician}. \newblock Springer-Verlag, 1971. \bibitem{MS} U.~Montanari and C.~Simonelli. \newblock On distinguishing between concurrency and nondeterminism. \newblock In {\em Proc. {Ecole} de {Printemps} on Concurrency and {Petri} Nets}, Colleville, 1980. \bibitem{NPW} M.~Nielsen, G.~Plotkin, and G.~Winskel. \newblock Petri nets, event structures, and domains, part {I}. \newblock {\em Theoretical Computer Science}, 13, 1981. \bibitem{PW} S.S. Pinter and P.~Wolper. \newblock A temporal logic to reason about partially ordered computations. \newblock In {\em Proc. 3rd ACM Symp. on Principles of Distributed Computing}, pages 28--37, Vancouver, August 1984. \bibitem{PS78} G.D. Plotkin and M.B. Smyth. \newblock The category-theoretic solution of recursive domain equations. \newblock Technical Report Research Report No. 60, D.A.I., 1978. \bibitem{Pr82} V.R. Pratt. \newblock On the composition of processes. \newblock In {\em Proceedings of the Ninth Annual ACM Symposium on Principles of Programming Languages}, January 1982. \bibitem{Pr84} V.R. Pratt. \newblock The pomset model of parallel processes: {Unifying} the temporal and the spatial. \newblock In {\em Proc. CMU/SERC Workshop on Analysis of Concurrency, LNCS 197}, pages 180--196, Pittsburgh, 1984. Springer-Verlag. \bibitem{Pr86} V.R. Pratt. \newblock Modeling concurrency with partial orders. \newblock {\em Int. J. of Parallel Programming}, 15(1):33--71, February 1986. \bibitem{Pr89a} V.R. Pratt. \newblock Enriched categories and the {Floyd-Warshall} connection. \newblock In {\em Proc. First International Conference on Algebraic Methodology and Software Technology}, pages 177--180, Iowa City, May 1989. \bibitem{Rab} A.~Rabinovich. \newblock Pomset semantics is consistent with dataflow semantics. \newblock Technical report, Tel-Aviv University, 1986. \bibitem{RT} A.~Rabinovich and B.A. Trakhtenbrot. \newblock Behavior structures and nets. \newblock {\em Fundamenta Informatica}, 11(4):357--404, 1988. \bibitem{War62} S.~Warshall. \newblock A theorem on {B}oolean matrices. \newblock {\em Journal of the ACM}, 9(1):11--12, 1962. \bibitem{Win86} G.~Winskel. \newblock Event structures: Lecture notes for the advanced course on {Petri} nets. \newblock Technical report, University of Cambridge, July 1986. \bibitem{Win88} G.~Winskel. \newblock A category of labelled {Petri} nets and compositional proof system. \newblock In {\em Proc. 3rd Annual Symposium on Logic in Computer Science}, Edinburgh, 1988. Computer Society Press. \end{thebibliography} \end{document}